-```
-
-## Arguments
-
-| Argument/Attribute | Type | Default | Description |
-| --- | --- | --- | --- |
-| `partition` | `string` | `default` | The name of the current partition |
-| `name` | `string` | | The name of the policy the will be used to
-interpolate the various policy names |
-
-## See
-
-- [Template Source Code](./index.hbs)
-
----
diff --git a/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs b/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs
deleted file mode 100644
index 301559b31e4b..000000000000
--- a/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs
+++ /dev/null
@@ -1,48 +0,0 @@
-{{!
- Copyright (c) HashiCorp, Inc.
- SPDX-License-Identifier: BUSL-1.1
-}}
-
-{{#if (can "use partitions")~}}
-partition "{{or @partition 'default'}}" {
- {{#if (can "use nspaces")}}
- namespace "default" {
- node "{{@name}}" {
- policy = "write"
- }
- }
- namespace_prefix "" {
- service_prefix "" {
- policy = "read"
- }
- }
- {{else}}
- node "{{@name}}" {
- policy = "write"
- }
- service_prefix "" {
- policy = "read"
- }
- {{/if}}
-}
-{{~else~}}
-{{~#if (can "use nspaces")~}}
-namespace "default" {
- node "{{@name}}" {
- policy = "write"
- }
-}
-namespace_prefix "" {
- service_prefix "" {
- policy = "read"
- }
-}
-{{else}}
-node "{{@name}}" {
- policy = "write"
-}
-service_prefix "" {
- policy = "read"
-}
-{{~/if~}}
-{{~/if~}}
\ No newline at end of file
diff --git a/ui/packages/consul-ui/app/components/consul/node/list/index.hbs b/ui/packages/consul-ui/app/components/consul/node/list/index.hbs
index 6ff04bc817fc..28df85ebcbc1 100644
--- a/ui/packages/consul-ui/app/components/consul/node/list/index.hbs
+++ b/ui/packages/consul-ui/app/components/consul/node/list/index.hbs
@@ -47,7 +47,7 @@ as |item index|>
@value={{item.Address}}
@name="Address"
/>
- {{item.Address}}
+ {{format-ipaddr item.Address}}
diff --git a/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx b/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx
deleted file mode 100644
index 89eb4cb255d1..000000000000
--- a/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx
+++ /dev/null
@@ -1,29 +0,0 @@
-# Consul::ServiceIdentity::Template
-
-The component is a text-only template that represents what a NodeIdentity
-policy looks like. The policy generated here is **not** what is sent back to
-the backend, instead its just a visual representation of what happens in the
-backend when you save a NodeIdentity.
-
-```hbs preview-template
-
-```
-
-## Arguments
-
-| Argument/Attribute | Type | Default | Description |
-| --- | --- | --- | --- |
-| `nspace` | `string` | `default` | The name of the current namespace |
-| `partition` | `string` | `default` | The name of the current partition |
-| `name` | `string` | | The name of the policy the will be used to
-interpolate the various policy names |
-
-## See
-
-- [Template Source Code](./index.hbs)
-
----
diff --git a/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs b/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs
deleted file mode 100644
index b822316bd598..000000000000
--- a/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs
+++ /dev/null
@@ -1,68 +0,0 @@
-{{!
- Copyright (c) HashiCorp, Inc.
- SPDX-License-Identifier: BUSL-1.1
-}}
-
-{{#if (can "use partitions")}}
-partition "{{or @partition 'default'}}" {
- {{#if (can 'use nspaces')}}
- namespace "{{or @nspace 'default'}}" {
- service "{{@name}}" {
- policy = "write"
- }
- service "{{@name}}-sidecar-proxy" {
- policy = "write"
- }
- service_prefix "" {
- policy = "read"
- }
- node_prefix "" {
- policy = "read"
- }
- }
- {{else}}
- service "{{@name}}" {
- policy = "write"
- }
- service "{{@name}}-sidecar-proxy" {
- policy = "write"
- }
- service_prefix "" {
- policy = "read"
- }
- node_prefix "" {
- policy = "read"
- }
- {{/if}}
-}
-{{else}}
-{{#if (can 'use nspaces')}}
-namespace "{{or @nspace 'default'}}" {
- service "{{@name}}" {
- policy = "write"
- }
- service "{{@name}}-sidecar-proxy" {
- policy = "write"
- }
- service_prefix "" {
- policy = "read"
- }
- node_prefix "" {
- policy = "read"
- }
-}
-{{else}}
-service "{{@name}}" {
- policy = "write"
-}
-service "{{@name}}-sidecar-proxy" {
- policy = "write"
-}
-service_prefix "" {
- policy = "read"
-}
-node_prefix "" {
- policy = "read"
-}
-{{/if}}
-{{/if}}
\ No newline at end of file
diff --git a/ui/packages/consul-ui/app/components/consul/service-instance/list/index.hbs b/ui/packages/consul-ui/app/components/consul/service-instance/list/index.hbs
index e341dcf29edf..c0dd5e143b7c 100644
--- a/ui/packages/consul-ui/app/components/consul/service-instance/list/index.hbs
+++ b/ui/packages/consul-ui/app/components/consul/service-instance/list/index.hbs
@@ -102,9 +102,9 @@ as |proxy|}}
`);
+
+ assert.dom('pre').includesText('service ""');
+ assert.dom('pre').includesText('sidecar-proxy');
+ });
+});
diff --git a/ui/packages/consul-ui/tests/steps/assertions/form.js b/ui/packages/consul-ui/tests/steps/assertions/form.js
index 1b7ef6f85f5d..fd31c688e08f 100644
--- a/ui/packages/consul-ui/tests/steps/assertions/form.js
+++ b/ui/packages/consul-ui/tests/steps/assertions/form.js
@@ -13,8 +13,10 @@ export default function (scenario, assert, find, currentPage) {
}
return Object.keys(data).reduce(function (prev, item, i, arr) {
const name = `${obj.prefix || property}[${item}]`;
- const $el = document.querySelector(`[name="${name}"]`);
- const actual = $el.value;
+ const $el =
+ document.querySelector(`[name="${name}"]`) ||
+ document.querySelector(`[aria-label="${name}"]`);
+ const actual = $el.value || $el.textContent;
const expected = data[item];
assert.strictEqual(actual, expected, `Expected settings to be ${expected} was ${actual}`);
}, obj);
diff --git a/ui/packages/consul-ui/tests/steps/interactions/form.js b/ui/packages/consul-ui/tests/steps/interactions/form.js
index bdd14a539616..8b12539fe2a6 100644
--- a/ui/packages/consul-ui/tests/steps/interactions/form.js
+++ b/ui/packages/consul-ui/tests/steps/interactions/form.js
@@ -3,10 +3,47 @@
* SPDX-License-Identifier: BUSL-1.1
*/
+const isClassParentOfElement = function (_class, element) {
+ if (!element || !element.parentElement) {
+ return false;
+ }
+ if (element.parentElement.classList.contains(_class)) {
+ return true;
+ }
+
+ return isClassParentOfElement(_class, element.parentElement);
+};
+
export default function (scenario, find, fillIn, triggerKeyEvent, currentPage) {
const dont = `( don't| shouldn't| can't)?`;
+
+ const fillInCodeEditor = function (page, name, value) {
+ const valueElement = document.querySelector(`[aria-label="${name}"]`);
+
+ const isCodeEditorElement = isClassParentOfElement('cm-editor', valueElement);
+ const isCodeBlockElement = isClassParentOfElement('hds-code-block', valueElement);
+ if (isCodeEditorElement) {
+ const valueBlock = document.createElement('div');
+ valueBlock.innerHTML = value;
+ valueElement.innerHTML = '';
+ valueElement.appendChild(valueBlock);
+ } else {
+ if (isCodeBlockElement) {
+ throw new Error(`The ${name} editor is set to readonly`);
+ }
+
+ return page;
+ }
+
+ return page;
+ };
+
const fillInElement = async function (page, name, value) {
const cm = document.querySelector(`textarea[name="${name}"] + .CodeMirror`);
+ const codeEditor = document.querySelector(`[aria-label="${name}"]`);
+ if (isClassParentOfElement('cm-editor', codeEditor)) {
+ return fillInCodeEditor(page, name, value);
+ }
if (cm) {
if (!cm.CodeMirror.options.readOnly) {
cm.CodeMirror.setValue(value);
@@ -82,6 +119,9 @@ export default function (scenario, find, fillIn, triggerKeyEvent, currentPage) {
return res;
}
)
+ .then(['I fill in code editor "$name" with "$value"'], function (name, value) {
+ return fillInCodeEditor(currentPage(), name, value);
+ })
.then(['I type "$text" into "$selector"'], function (text, selector) {
return fillIn(selector, text);
})
diff --git a/ui/packages/consul-ui/tests/unit/services/code-mirror/linter-test.js b/ui/packages/consul-ui/tests/unit/services/code-mirror/linter-test.js
deleted file mode 100644
index ec7c35002e39..000000000000
--- a/ui/packages/consul-ui/tests/unit/services/code-mirror/linter-test.js
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * Copyright (c) HashiCorp, Inc.
- * SPDX-License-Identifier: BUSL-1.1
- */
-
-import { module, test } from 'qunit';
-import { setupTest } from 'ember-qunit';
-
-module('Unit | Service | code mirror/linter', function (hooks) {
- setupTest(hooks);
-
- // Replace this with your real tests.
- test('it exists', function (assert) {
- let service = this.owner.lookup('service:code-mirror/linter');
- assert.ok(service);
- });
-});
diff --git a/ui/packages/consul-ui/tests/unit/utils/process-ip-address-test.js b/ui/packages/consul-ui/tests/unit/utils/process-ip-address-test.js
new file mode 100644
index 000000000000..5ebf5b2642c0
--- /dev/null
+++ b/ui/packages/consul-ui/tests/unit/utils/process-ip-address-test.js
@@ -0,0 +1,45 @@
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+
+import { processIpAddress } from 'consul-ui/utils/process-ip-address';
+import { module, test } from 'qunit';
+
+module('Unit | Utility | Process Ip Address', function () {
+ test('Returns as it is for ipv4 and already collapsed', function (assert) {
+ let result = processIpAddress('192.168.1.1');
+ assert.equal(result, '192.168.1.1');
+
+ assert.equal(processIpAddress('255.255.255.255'), '255.255.255.255');
+
+ assert.equal(processIpAddress('2001:db8::ff00:42:8329'), '[2001:db8::ff00:42:8329]');
+
+ assert.equal(processIpAddress('::1'), '[::1]');
+
+ assert.equal(processIpAddress('fe80::202:b3ff:fe1e:8329'), '[fe80::202:b3ff:fe1e:8329]');
+
+ assert.equal(processIpAddress('::'), '[::]');
+ });
+
+ test('Returns null for invalid IP address', function (assert) {
+ assert.equal(processIpAddress('2001::85a3::8a2e:370:7334'), null);
+
+ assert.equal(processIpAddress('2001:db8:0:0:0:0:0:0:1:2'), null);
+ assert.equal(processIpAddress('2001:db8:g::1'), null);
+ assert.equal(processIpAddress('2001:db8:1::2:3:4:5:6'), null);
+ });
+
+ test('Returns collapsed IP address', function (assert) {
+ assert.equal(
+ processIpAddress('2001:0db8:0000:0000:0000:ff00:0042:8329'),
+ '[2001:db8::ff00:42:8329]'
+ );
+
+ assert.equal(processIpAddress('2001:db8:0:0:0:ff00:42:8329'), '[2001:db8::ff00:42:8329]');
+
+ assert.equal(processIpAddress('2001:db8::ff00:42:8329'), '[2001:db8::ff00:42:8329]');
+
+ assert.equal(processIpAddress('fe80::202:b3ff:fe1e:8329'), '[fe80::202:b3ff:fe1e:8329]');
+ });
+});
diff --git a/ui/yarn.lock b/ui/yarn.lock
index 10cbc0221d58..ee5467421975 100644
--- a/ui/yarn.lock
+++ b/ui/yarn.lock
@@ -2,11 +2,6 @@
# yarn lockfile v1
-"@alloc/quick-lru@^5.2.0":
- version "5.2.0"
- resolved "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz#7bf68b20c0a350f936915fcae06f58e32007ce30"
- integrity sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==
-
"@ampproject/remapping@^2.2.0":
version "2.3.0"
resolved "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4"
@@ -30,6 +25,15 @@
"@babel/highlight" "^7.24.7"
picocolors "^1.0.0"
+"@babel/code-frame@^7.27.1":
+ version "7.27.1"
+ resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be"
+ integrity sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.27.1"
+ js-tokens "^4.0.0"
+ picocolors "^1.1.1"
+
"@babel/compat-data@^7.20.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz#d23bbea508c3883ba8251fb4164982c36ea577ed"
@@ -66,6 +70,17 @@
"@jridgewell/trace-mapping" "^0.3.25"
jsesc "^2.5.1"
+"@babel/generator@^7.27.5":
+ version "7.27.5"
+ resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.27.5.tgz#3eb01866b345ba261b04911020cbe22dd4be8c8c"
+ integrity sha512-ZGhA37l0e/g2s1Cnzdix0O3aLYm66eF8aufiVteOgnwxgnRP8GoyMj7VWsgWnQbVKXyge7hqrFh2K2TQM6t1Hw==
+ dependencies:
+ "@babel/parser" "^7.27.5"
+ "@babel/types" "^7.27.3"
+ "@jridgewell/gen-mapping" "^0.3.5"
+ "@jridgewell/trace-mapping" "^0.3.25"
+ jsesc "^3.0.2"
+
"@babel/helper-annotate-as-pure@^7.18.6", "@babel/helper-annotate-as-pure@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz#5373c7bc8366b12a033b4be1ac13a206c6656aab"
@@ -165,6 +180,14 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
+"@babel/helper-module-imports@^7.22.15":
+ version "7.27.1"
+ resolved "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz#7ef769a323e2655e126673bb6d2d6913bbead204"
+ integrity sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==
+ dependencies:
+ "@babel/traverse" "^7.27.1"
+ "@babel/types" "^7.27.1"
+
"@babel/helper-module-imports@^7.24.7", "@babel/helper-module-imports@^7.8.3":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz#f2f980392de5b84c3328fc71d38bd81bbb83042b"
@@ -242,11 +265,21 @@
resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz#4d2d0f14820ede3b9807ea5fc36dfc8cd7da07f2"
integrity sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==
+"@babel/helper-string-parser@^7.27.1":
+ version "7.27.1"
+ resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687"
+ integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==
+
"@babel/helper-validator-identifier@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz#75b889cfaf9e35c2aaf42cf0d72c8e91719251db"
integrity sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==
+"@babel/helper-validator-identifier@^7.27.1":
+ version "7.27.1"
+ resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz#a7054dcc145a967dd4dc8fee845a57c1316c9df8"
+ integrity sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==
+
"@babel/helper-validator-option@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz#24c3bb77c7a425d1742eec8fb433b5a1b38e62f6"
@@ -262,13 +295,13 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
-"@babel/helpers@^7.24.7":
- version "7.24.7"
- resolved "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz#aa2ccda29f62185acb5d42fb4a3a1b1082107416"
- integrity sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==
+"@babel/helpers@7.26.10", "@babel/helpers@^7.24.7":
+ version "7.26.10"
+ resolved "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.10.tgz#6baea3cd62ec2d0c1068778d63cb1314f6637384"
+ integrity sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g==
dependencies:
- "@babel/template" "^7.24.7"
- "@babel/types" "^7.24.7"
+ "@babel/template" "^7.26.9"
+ "@babel/types" "^7.26.10"
"@babel/highlight@^7.10.4", "@babel/highlight@^7.24.7":
version "7.24.7"
@@ -285,6 +318,20 @@
resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz#9a5226f92f0c5c8ead550b750f5608e766c8ce85"
integrity sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==
+"@babel/parser@^7.27.2":
+ version "7.27.5"
+ resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.27.5.tgz#ed22f871f110aa285a6fd934a0efed621d118826"
+ integrity sha512-OsQd175SxWkGlzbny8J3K8TnnDD0N3lrIUtB92xwyRpzaenGZhxDvxN/JgU00U3CDZNj9tPuDJ5H0WS4Nt3vKg==
+ dependencies:
+ "@babel/types" "^7.27.3"
+
+"@babel/parser@^7.27.5", "@babel/parser@^7.27.7":
+ version "7.27.7"
+ resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.27.7.tgz#1687f5294b45039c159730e3b9c1f1b242e425e9"
+ integrity sha512-qnzXzDXdr/po3bOTbTIQZ7+TxNKxpkN5IifVLXS+r7qwynkZfPyjZfE7hCXbo7IoO9TNcSyibgONsf2HauUd3Q==
+ dependencies:
+ "@babel/types" "^7.27.7"
+
"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz#fd059fd27b184ea2b4c7e646868a9a381bbc3055"
@@ -325,7 +372,7 @@
"@babel/helper-create-class-features-plugin" "^7.18.6"
"@babel/helper-plugin-utils" "^7.18.6"
-"@babel/plugin-proposal-decorators@^7.13.5", "@babel/plugin-proposal-decorators@^7.16.7", "@babel/plugin-proposal-decorators@^7.20.13":
+"@babel/plugin-proposal-decorators@^7.13.5", "@babel/plugin-proposal-decorators@^7.16.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.24.7.tgz#7e2dcfeda4a42596b57c4c9de1f5176bbfc532e3"
integrity sha512-RL9GR0pUG5Kc8BUWLNDm2T5OpYwSX15r98I0IkgmRQTXuELq/OynH8xtMTMvTJFjXbMWFVTKtYkTaYQsuAwQlQ==
@@ -375,7 +422,7 @@
resolved "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz#7844f9289546efa9febac2de4cfe358a050bd703"
integrity sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==
-"@babel/plugin-proposal-private-property-in-object@^7.16.5", "@babel/plugin-proposal-private-property-in-object@^7.20.5":
+"@babel/plugin-proposal-private-property-in-object@^7.16.5":
version "7.21.11"
resolved "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.11.tgz#69d597086b6760c4126525cfa154f34631ff272c"
integrity sha512-0QZ8qP/3RLDVBwBFoWAwCtgcDZJVwA5LUJRZU8x2YFfKNuFq161wK3cuGrALu5yiPu+vzwTAg/sMWVNeWeNyaw==
@@ -574,7 +621,7 @@
"@babel/helper-create-class-features-plugin" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-class-static-block@^7.16.7", "@babel/plugin-transform-class-static-block@^7.22.11", "@babel/plugin-transform-class-static-block@^7.24.7":
+"@babel/plugin-transform-class-static-block@^7.16.7", "@babel/plugin-transform-class-static-block@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz#c82027ebb7010bc33c116d4b5044fbbf8c05484d"
integrity sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==
@@ -698,7 +745,7 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-modules-amd@^7.12.1", "@babel/plugin-transform-modules-amd@^7.13.0", "@babel/plugin-transform-modules-amd@^7.20.11", "@babel/plugin-transform-modules-amd@^7.24.7":
+"@babel/plugin-transform-modules-amd@^7.12.1", "@babel/plugin-transform-modules-amd@^7.13.0", "@babel/plugin-transform-modules-amd@^7.24.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz#65090ed493c4a834976a3ca1cde776e6ccff32d7"
integrity sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==
@@ -901,7 +948,7 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-typescript@^7.13.0", "@babel/plugin-transform-typescript@^7.20.13":
+"@babel/plugin-transform-typescript@^7.13.0":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz#b006b3e0094bf0813d505e0c5485679eeaf4a881"
integrity sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==
@@ -976,7 +1023,7 @@
core-js "^2.6.5"
regenerator-runtime "^0.13.4"
-"@babel/preset-env@^7.10.2", "@babel/preset-env@^7.16.5", "@babel/preset-env@^7.16.7", "@babel/preset-env@^7.20.2":
+"@babel/preset-env@^7.10.2", "@babel/preset-env@^7.16.5", "@babel/preset-env@^7.16.7":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz#ff067b4e30ba4a72f225f12f123173e77b987f37"
integrity sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==
@@ -1077,17 +1124,10 @@
resolved "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310"
integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==
-"@babel/runtime@7.12.18":
- version "7.12.18"
- resolved "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.18.tgz#af137bd7e7d9705a412b3caaf991fe6aaa97831b"
- integrity sha512-BogPQ7ciE6SYAUPtlm9tWbgI9+2AgqSam6QivMgXgAT+fKbgppaj4ZX15MHeLC1PVF5sNk70huBu20XxWOs8Cg==
- dependencies:
- regenerator-runtime "^0.13.4"
-
-"@babel/runtime@^7.12.5", "@babel/runtime@^7.17.8", "@babel/runtime@^7.8.4":
- version "7.24.7"
- resolved "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz#f4f0d5530e8dbdf59b3451b9b3e594b6ba082e12"
- integrity sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==
+"@babel/runtime@7.12.18", "@babel/runtime@8.0.0-alpha.17", "@babel/runtime@^7.12.5", "@babel/runtime@^7.17.8", "@babel/runtime@^7.8.4":
+ version "8.0.0-alpha.17"
+ resolved "https://registry.npmjs.org/@babel/runtime/-/runtime-8.0.0-alpha.17.tgz#325cdc17591b6b0e96ff6d07a136eb0a73022f14"
+ integrity sha512-jeV3fYCLTbEwor7EBzOxhZbW+bxHJpm0V0xhaHGfWQwjsHENO2RBHVxFRTG2zfczCgOpz6TqP7EXVSUaooex6g==
dependencies:
regenerator-runtime "^0.14.0"
@@ -1100,6 +1140,15 @@
"@babel/parser" "^7.24.7"
"@babel/types" "^7.24.7"
+"@babel/template@^7.26.9", "@babel/template@^7.27.2":
+ version "7.27.2"
+ resolved "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz#fa78ceed3c4e7b63ebf6cb39e5852fca45f6809d"
+ integrity sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==
+ dependencies:
+ "@babel/code-frame" "^7.27.1"
+ "@babel/parser" "^7.27.2"
+ "@babel/types" "^7.27.1"
+
"@babel/traverse@^7.1.6", "@babel/traverse@^7.12.1", "@babel/traverse@^7.24.7", "@babel/traverse@^7.4.5", "@babel/traverse@^7.7.0":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz#de2b900163fa741721ba382163fe46a936c40cf5"
@@ -1116,6 +1165,19 @@
debug "^4.3.1"
globals "^11.1.0"
+"@babel/traverse@^7.27.1":
+ version "7.27.7"
+ resolved "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.7.tgz#8355c39be6818362eace058cf7f3e25ac2ec3b55"
+ integrity sha512-X6ZlfR/O/s5EQ/SnUSLzr+6kGnkg8HXGMzpgsMsrJVcfDtH1vIp6ctCN4eZ1LS5c0+te5Cb6Y514fASjMRJ1nw==
+ dependencies:
+ "@babel/code-frame" "^7.27.1"
+ "@babel/generator" "^7.27.5"
+ "@babel/parser" "^7.27.7"
+ "@babel/template" "^7.27.2"
+ "@babel/types" "^7.27.7"
+ debug "^4.3.1"
+ globals "^11.1.0"
+
"@babel/types@^7.1.6", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.24.7", "@babel/types@^7.4.4", "@babel/types@^7.7.0", "@babel/types@^7.7.2":
version "7.24.7"
resolved "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz#6027fe12bc1aa724cd32ab113fb7f1988f1f66f2"
@@ -1125,6 +1187,22 @@
"@babel/helper-validator-identifier" "^7.24.7"
to-fast-properties "^2.0.0"
+"@babel/types@^7.26.10", "@babel/types@^7.27.1", "@babel/types@^7.27.3":
+ version "7.27.6"
+ resolved "https://registry.npmjs.org/@babel/types/-/types-7.27.6.tgz#a434ca7add514d4e646c80f7375c0aa2befc5535"
+ integrity sha512-ETyHEk2VHHvl9b9jZP5IHPavHYk57EhanlRRuae9XCpb/j5bDCbPPMOBfCWhnl/7EDJz0jEMCi/RhccCE8r1+Q==
+ dependencies:
+ "@babel/helper-string-parser" "^7.27.1"
+ "@babel/helper-validator-identifier" "^7.27.1"
+
+"@babel/types@^7.27.7":
+ version "7.27.7"
+ resolved "https://registry.npmjs.org/@babel/types/-/types-7.27.7.tgz#40eabd562049b2ee1a205fa589e629f945dce20f"
+ integrity sha512-8OLQgDScAOHXnAz2cV+RfzzNMipuLVBz2biuAJFMV9bfkNf393je3VM8CLkjQodW5+iWsSJdSgSWT6rsZoXHPw==
+ dependencies:
+ "@babel/helper-string-parser" "^7.27.1"
+ "@babel/helper-validator-identifier" "^7.27.1"
+
"@cnakazawa/watch@^1.0.3":
version "1.0.4"
resolved "https://registry.npmjs.org/@cnakazawa/watch/-/watch-1.0.4.tgz#f864ae85004d0fcab6f50be9141c4da368d1656a"
@@ -1133,25 +1211,172 @@
exec-sh "^0.3.2"
minimist "^1.2.0"
+"@codemirror/autocomplete@^6.0.0", "@codemirror/autocomplete@^6.7.1":
+ version "6.18.6"
+ resolved "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.6.tgz#de26e864a1ec8192a1b241eb86addbb612964ddb"
+ integrity sha512-PHHBXFomUs5DF+9tCOM/UoW6XQ4R44lLNNhRaW9PKPTU0D7lIjRg3ElxaJnTwsl/oHiR93WSXDBrekhoUGCPtg==
+ dependencies:
+ "@codemirror/language" "^6.0.0"
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.17.0"
+ "@lezer/common" "^1.0.0"
+
+"@codemirror/commands@^6.8.0":
+ version "6.8.1"
+ resolved "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.1.tgz#639f5559d2f33f2582a2429c58cb0c1b925c7a30"
+ integrity sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw==
+ dependencies:
+ "@codemirror/language" "^6.0.0"
+ "@codemirror/state" "^6.4.0"
+ "@codemirror/view" "^6.27.0"
+ "@lezer/common" "^1.1.0"
+
+"@codemirror/lang-css@^6.0.0":
+ version "6.3.1"
+ resolved "https://registry.npmjs.org/@codemirror/lang-css/-/lang-css-6.3.1.tgz#763ca41aee81bb2431be55e3cfcc7cc8e91421a3"
+ integrity sha512-kr5fwBGiGtmz6l0LSJIbno9QrifNMUusivHbnA1H6Dmqy4HZFte3UAICix1VuKo0lMPKQr2rqB+0BkKi/S3Ejg==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/language" "^6.0.0"
+ "@codemirror/state" "^6.0.0"
+ "@lezer/common" "^1.0.2"
+ "@lezer/css" "^1.1.7"
+
+"@codemirror/lang-go@^6.0.1":
+ version "6.0.1"
+ resolved "https://registry.npmjs.org/@codemirror/lang-go/-/lang-go-6.0.1.tgz#598222c90f56eae28d11069c612ca64d0306b057"
+ integrity sha512-7fNvbyNylvqCphW9HD6WFnRpcDjr+KXX/FgqXy5H5ZS0eC5edDljukm/yNgYkwTsgp2busdod50AOTIy6Jikfg==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/language" "^6.6.0"
+ "@codemirror/state" "^6.0.0"
+ "@lezer/common" "^1.0.0"
+ "@lezer/go" "^1.0.0"
+
+"@codemirror/lang-html@^6.0.0":
+ version "6.4.9"
+ resolved "https://registry.npmjs.org/@codemirror/lang-html/-/lang-html-6.4.9.tgz#d586f2cc9c341391ae07d1d7c545990dfa069727"
+ integrity sha512-aQv37pIMSlueybId/2PVSP6NPnmurFDVmZwzc7jszd2KAF8qd4VBbvNYPXWQq90WIARjsdVkPbw29pszmHws3Q==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/lang-css" "^6.0.0"
+ "@codemirror/lang-javascript" "^6.0.0"
+ "@codemirror/language" "^6.4.0"
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.17.0"
+ "@lezer/common" "^1.0.0"
+ "@lezer/css" "^1.1.0"
+ "@lezer/html" "^1.3.0"
+
+"@codemirror/lang-javascript@^6.0.0", "@codemirror/lang-javascript@^6.2.2":
+ version "6.2.4"
+ resolved "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.4.tgz#eef2227d1892aae762f3a0f212f72bec868a02c5"
+ integrity sha512-0WVmhp1QOqZ4Rt6GlVGwKJN3KW7Xh4H2q8ZZNGZaP6lRdxXJzmjm4FqvmOojVj6khWJHIb9sp7U/72W7xQgqAA==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/language" "^6.6.0"
+ "@codemirror/lint" "^6.0.0"
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.17.0"
+ "@lezer/common" "^1.0.0"
+ "@lezer/javascript" "^1.0.0"
+
+"@codemirror/lang-json@^6.0.1":
+ version "6.0.2"
+ resolved "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.2.tgz#054b160671306667e25d80385286049841836179"
+ integrity sha512-x2OtO+AvwEHrEwR0FyyPtfDUiloG3rnVTSZV1W8UteaLL8/MajQd8DpvUb2YVzC+/T18aSDv0H9mu+xw0EStoQ==
+ dependencies:
+ "@codemirror/language" "^6.0.0"
+ "@lezer/json" "^1.0.0"
+
+"@codemirror/lang-markdown@^6.3.2":
+ version "6.3.3"
+ resolved "https://registry.npmjs.org/@codemirror/lang-markdown/-/lang-markdown-6.3.3.tgz#457f93bd8a2d422dae0625b20b61adf5c6d23def"
+ integrity sha512-1fn1hQAPWlSSMCvnF810AkhWpNLkJpl66CRfIy3vVl20Sl4NwChkorCHqpMtNbXr1EuMJsrDnhEpjZxKZ2UX3A==
+ dependencies:
+ "@codemirror/autocomplete" "^6.7.1"
+ "@codemirror/lang-html" "^6.0.0"
+ "@codemirror/language" "^6.3.0"
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.0.0"
+ "@lezer/common" "^1.2.1"
+ "@lezer/markdown" "^1.0.0"
+
+"@codemirror/lang-sql@^6.8.0":
+ version "6.9.0"
+ resolved "https://registry.npmjs.org/@codemirror/lang-sql/-/lang-sql-6.9.0.tgz#0130da09c7d827b0aa5f9598f61bca975a5480c7"
+ integrity sha512-xmtpWqKSgum1B1J3Ro6rf7nuPqf2+kJQg5SjrofCAcyCThOe0ihSktSoXfXuhQBnwx1QbmreBbLJM5Jru6zitg==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/language" "^6.0.0"
+ "@codemirror/state" "^6.0.0"
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.0.0"
+
+"@codemirror/lang-yaml@^6.1.2":
+ version "6.1.2"
+ resolved "https://registry.npmjs.org/@codemirror/lang-yaml/-/lang-yaml-6.1.2.tgz#c84280c68fa7af456a355d91183b5e537e9b7038"
+ integrity sha512-dxrfG8w5Ce/QbT7YID7mWZFKhdhsaTNOYjOkSIMt1qmC4VQnXSDSYVHHHn8k6kJUfIhtLo8t1JJgltlxWdsITw==
+ dependencies:
+ "@codemirror/autocomplete" "^6.0.0"
+ "@codemirror/language" "^6.0.0"
+ "@codemirror/state" "^6.0.0"
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.2.0"
+ "@lezer/lr" "^1.0.0"
+ "@lezer/yaml" "^1.0.0"
+
+"@codemirror/language@^6.0.0", "@codemirror/language@^6.10.3", "@codemirror/language@^6.3.0", "@codemirror/language@^6.4.0", "@codemirror/language@^6.6.0":
+ version "6.11.2"
+ resolved "https://registry.npmjs.org/@codemirror/language/-/language-6.11.2.tgz#90d2d094cfbd14263bc5354ebd2445ee4e81bdc3"
+ integrity sha512-p44TsNArL4IVXDTbapUmEkAlvWs2CFQbcfc0ymDsis1kH2wh0gcY96AS29c/vp2d0y2Tquk1EDSaawpzilUiAw==
+ dependencies:
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.23.0"
+ "@lezer/common" "^1.1.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.0.0"
+ style-mod "^4.0.0"
+
+"@codemirror/legacy-modes@^6.4.2":
+ version "6.5.1"
+ resolved "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.5.1.tgz#6bd13fac94f67a825e5420017e0d2f3c35d09342"
+ integrity sha512-DJYQQ00N1/KdESpZV7jg9hafof/iBNp9h7TYo1SLMk86TWl9uDsVdho2dzd81K+v4retmK6mdC7WpuOQDytQqw==
+ dependencies:
+ "@codemirror/language" "^6.0.0"
+
+"@codemirror/lint@^6.0.0", "@codemirror/lint@^6.8.4":
+ version "6.8.5"
+ resolved "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.5.tgz#9edaa808e764e28e07665b015951934c8ec3a418"
+ integrity sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA==
+ dependencies:
+ "@codemirror/state" "^6.0.0"
+ "@codemirror/view" "^6.35.0"
+ crelt "^1.0.5"
+
+"@codemirror/state@^6.0.0", "@codemirror/state@^6.4.0", "@codemirror/state@^6.5.0":
+ version "6.5.2"
+ resolved "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz#8eca3a64212a83367dc85475b7d78d5c9b7076c6"
+ integrity sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==
+ dependencies:
+ "@marijn/find-cluster-break" "^1.0.0"
+
+"@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.27.0", "@codemirror/view@^6.35.0", "@codemirror/view@^6.36.2":
+ version "6.38.0"
+ resolved "https://registry.npmjs.org/@codemirror/view/-/view-6.38.0.tgz#4486062b791a4247793e0953e05ae71a9e172217"
+ integrity sha512-yvSchUwHOdupXkd7xJ0ob36jdsSR/I+/C+VbY0ffBiL5NiSTEBDfB1ZGWbbIlDd5xgdUkody+lukAdOxYrOBeg==
+ dependencies:
+ "@codemirror/state" "^6.5.0"
+ crelt "^1.0.6"
+ style-mod "^4.1.0"
+ w3c-keyname "^2.2.4"
+
"@colors/colors@1.5.0":
version "1.5.0"
resolved "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9"
integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==
-"@csstools/postcss-sass@^5.0.1":
- version "5.1.1"
- resolved "https://registry.npmjs.org/@csstools/postcss-sass/-/postcss-sass-5.1.1.tgz#135921df13bc56bee50c7470a66e4e9f3d5c89ae"
- integrity sha512-La7bgTcM6YwPBLqlaXg7lMLry82iLv1a+S1RmgvHq2mH2Zd57L2anjZvJC8ACUHWc4M9fXws93dq6gaK0kZyAw==
- dependencies:
- "@csstools/sass-import-resolve" "^1.0.0"
- sass "^1.69.5"
- source-map "~0.7.4"
-
-"@csstools/sass-import-resolve@^1.0.0":
- version "1.0.0"
- resolved "https://registry.npmjs.org/@csstools/sass-import-resolve/-/sass-import-resolve-1.0.0.tgz#32c3cdb2f7af3cd8f0dca357b592e7271f3831b5"
- integrity sha512-pH4KCsbtBLLe7eqUrw8brcuFO8IZlN36JjdKlOublibVdAIPHCzEnpBWOVUXK5sCf+DpBi8ZtuWtjF0srybdeA==
-
"@docfy/core@^0.4.4":
version "0.4.4"
resolved "https://registry.npmjs.org/@docfy/core/-/core-0.4.4.tgz#041157870abcde99e64068cdbd79767b2c1a97b4"
@@ -1377,7 +1602,7 @@
ember-cli-babel "^7.10.0"
ember-modifier-manager-polyfill "^1.1.0"
-"@ember/render-modifiers@^2.0.0", "@ember/render-modifiers@^2.0.5":
+"@ember/render-modifiers@^2.0.0", "@ember/render-modifiers@^2.1.0":
version "2.1.0"
resolved "https://registry.npmjs.org/@ember/render-modifiers/-/render-modifiers-2.1.0.tgz#f4fff95a8b5cfbe947ec46644732d511711c5bf9"
integrity sha512-LruhfoDv2itpk0fA0IC76Sxjcnq/7BC6txpQo40hOko8Dn6OxwQfxkPIbZGV0Cz7df+iX+VJrcYzNIvlc3w2EQ==
@@ -1417,7 +1642,7 @@
ember-cli-version-checker "^5.1.2"
semver "^7.3.5"
-"@embroider/addon-shim@^1.0.0", "@embroider/addon-shim@^1.2.0", "@embroider/addon-shim@^1.8.3", "@embroider/addon-shim@^1.8.4", "@embroider/addon-shim@^1.8.7":
+"@embroider/addon-shim@^1.0.0", "@embroider/addon-shim@^1.2.0", "@embroider/addon-shim@^1.8.3", "@embroider/addon-shim@^1.8.7":
version "1.8.9"
resolved "https://registry.npmjs.org/@embroider/addon-shim/-/addon-shim-1.8.9.tgz#ef37eba069d391b2d2a80aa62880c469051c4d43"
integrity sha512-qyN64T1jMHZ99ihlk7VFHCWHYZHLE1DOdHi0J7lmn5waV1DoW7gD8JLi1i7FregzXtKhbDc7shyEmTmWPTs8MQ==
@@ -1427,6 +1652,16 @@
common-ancestor-path "^1.0.1"
semver "^7.3.8"
+"@embroider/addon-shim@^1.10.0", "@embroider/addon-shim@^1.6.0", "@embroider/addon-shim@^1.8.6", "@embroider/addon-shim@^1.9.0":
+ version "1.10.0"
+ resolved "https://registry.npmjs.org/@embroider/addon-shim/-/addon-shim-1.10.0.tgz#7c3325e0939674290a9ca4ad7d744ee69313c0a0"
+ integrity sha512-gcJuHiXgnrzaU8NyU+2bMbtS6PNOr5v5B8OXBqaBvTCsMpXLvKo8OBOQFCoUN0rPX2J6VaFqrbi/371sMvzZug==
+ dependencies:
+ "@embroider/shared-internals" "^3.0.0"
+ broccoli-funnel "^3.0.8"
+ common-ancestor-path "^1.0.1"
+ semver "^7.3.8"
+
"@embroider/core@0.36.0":
version "0.36.0"
resolved "https://registry.npmjs.org/@embroider/core/-/core-0.36.0.tgz#fbbd60d29c3fcbe02b4e3e63e6043a43de2b9ce3"
@@ -1518,7 +1753,7 @@
resolve "^1.20.0"
semver "^7.3.2"
-"@embroider/macros@^0.50.0 || ^1.0.0", "@embroider/macros@^1.0.0", "@embroider/macros@^1.10.0", "@embroider/macros@^1.16.1", "@embroider/macros@^1.2.0":
+"@embroider/macros@^0.50.0 || ^1.0.0", "@embroider/macros@^1.0.0", "@embroider/macros@^1.10.0", "@embroider/macros@^1.16.1":
version "1.16.5"
resolved "https://registry.npmjs.org/@embroider/macros/-/macros-1.16.5.tgz#871addab2103b554c6b6a3a337c00e3f0a0462ac"
integrity sha512-Oz8bUZvZzOV1Gk3qSgIzZJJzs6acclSTcEFyB+KdKbKqjTC3uebn53aU2gAlLU7/YdTRZrg2gNbQuwAp+tGkGg==
@@ -1532,6 +1767,34 @@
resolve "^1.20.0"
semver "^7.3.2"
+"@embroider/macros@^1.12.3", "@embroider/macros@^1.18.0":
+ version "1.18.0"
+ resolved "https://registry.npmjs.org/@embroider/macros/-/macros-1.18.0.tgz#d79c4474667559ac9baf903e8fb89f1b00a0c45a"
+ integrity sha512-KanP80XxNK4bmQ1HKTcUjy/cdCt9n7knPMLK1vzHdOFymACHo+GbhgUjXjYdOCuBTv+ZwcjL2P2XDmBcYS9r8g==
+ dependencies:
+ "@embroider/shared-internals" "3.0.0"
+ assert-never "^1.2.1"
+ babel-import-util "^3.0.1"
+ ember-cli-babel "^7.26.6"
+ find-up "^5.0.0"
+ lodash "^4.17.21"
+ resolve "^1.20.0"
+ semver "^7.3.2"
+
+"@embroider/macros@~1.16.0":
+ version "1.16.13"
+ resolved "https://registry.npmjs.org/@embroider/macros/-/macros-1.16.13.tgz#3647839de7154400115e0b874bbf5aed9312a7a8"
+ integrity sha512-2oGZh0m1byBYQFWEa8b2cvHJB2LzaF3DdMCLCqcRAccABMROt1G3sultnNCT30NhfdGWMEsJOT3Jm4nFxXmTRw==
+ dependencies:
+ "@embroider/shared-internals" "2.9.0"
+ assert-never "^1.2.1"
+ babel-import-util "^2.0.0"
+ ember-cli-babel "^7.26.6"
+ find-up "^5.0.0"
+ lodash "^4.17.21"
+ resolve "^1.20.0"
+ semver "^7.3.2"
+
"@embroider/shared-internals@0.41.0":
version "0.41.0"
resolved "https://registry.npmjs.org/@embroider/shared-internals/-/shared-internals-0.41.0.tgz#2553f026d4f48ea1fd11235501feb63bf49fa306"
@@ -1587,6 +1850,43 @@
semver "^7.3.5"
typescript-memoize "^1.0.1"
+"@embroider/shared-internals@2.9.0":
+ version "2.9.0"
+ resolved "https://registry.npmjs.org/@embroider/shared-internals/-/shared-internals-2.9.0.tgz#5d945b92e08db163de60d82f7c388e2b7260f0cc"
+ integrity sha512-8untWEvGy6av/oYibqZWMz/yB+LHsKxEOoUZiLvcpFwWj2Sipc0DcXeTJQZQZ++otNkLCWyDrDhOLrOkgjOPSg==
+ dependencies:
+ babel-import-util "^2.0.0"
+ debug "^4.3.2"
+ ember-rfc176-data "^0.3.17"
+ fs-extra "^9.1.0"
+ is-subdir "^1.2.0"
+ js-string-escape "^1.0.1"
+ lodash "^4.17.21"
+ minimatch "^3.0.4"
+ pkg-entry-points "^1.1.0"
+ resolve-package-path "^4.0.1"
+ semver "^7.3.5"
+ typescript-memoize "^1.0.1"
+
+"@embroider/shared-internals@3.0.0", "@embroider/shared-internals@^3.0.0":
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/@embroider/shared-internals/-/shared-internals-3.0.0.tgz#98251e6b99d36d64120361a449569ef5384b3812"
+ integrity sha512-5J5ipUMCAinQS38WW7wedruq5Z4VnHvNo+ZgOduw0PtI9w0CQWx7/HE+98PBDW8jclikeF+aHwF317vc1hwuzg==
+ dependencies:
+ babel-import-util "^3.0.1"
+ debug "^4.3.2"
+ ember-rfc176-data "^0.3.17"
+ fs-extra "^9.1.0"
+ is-subdir "^1.2.0"
+ js-string-escape "^1.0.1"
+ lodash "^4.17.21"
+ minimatch "^3.0.4"
+ pkg-entry-points "^1.1.0"
+ resolve-package-path "^4.0.1"
+ resolve.exports "^2.0.2"
+ semver "^7.3.5"
+ typescript-memoize "^1.0.1"
+
"@embroider/shared-internals@^1.0.0":
version "1.8.3"
resolved "https://registry.npmjs.org/@embroider/shared-internals/-/shared-internals-1.8.3.tgz#52d868dc80016e9fe983552c0e516f437bf9b9f9"
@@ -1610,7 +1910,7 @@
broccoli-funnel "^3.0.5"
ember-cli-babel "^7.23.1"
-"@embroider/util@^0.39.1 || ^0.40.0 || ^0.41.0 || ^1.0.0", "@embroider/util@^1.0.0", "@embroider/util@^1.9.0":
+"@embroider/util@^0.39.1 || ^0.40.0 || ^0.41.0 || ^1.0.0", "@embroider/util@^1.9.0":
version "1.13.1"
resolved "https://registry.npmjs.org/@embroider/util/-/util-1.13.1.tgz#c6d4a569b331cbf805e68e7fa6602f248438bde6"
integrity sha512-MRbs2FPO4doQ31YHIYk+QKChEs7k15aTsMk8QmO4eKiuQq9OT0sr1oasObZyGB8cVVbr29WWRWmsNirxzQtHIg==
@@ -1628,6 +1928,15 @@
broccoli-funnel "^3.0.5"
ember-cli-babel "^7.23.1"
+"@embroider/util@^1.13.2":
+ version "1.13.3"
+ resolved "https://registry.npmjs.org/@embroider/util/-/util-1.13.3.tgz#ac6a12f54097173167a9a1189a49ea6cd5b45755"
+ integrity sha512-fb9S137zZqSI1IeWpGKVJ+WZHsRiIrD9D2A4aVwVH0dZeBKDg6lMaMN2MiXJ/ldUAG3DUFxnClnpiG5m2g3JFA==
+ dependencies:
+ "@embroider/macros" "~1.16.0"
+ broccoli-funnel "^3.0.5"
+ ember-cli-babel "^7.26.11"
+
"@eslint/eslintrc@^0.4.3":
version "0.4.3"
resolved "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz#9e42981ef035beb3dd49add17acb96e8ff6f394c"
@@ -1643,6 +1952,26 @@
minimatch "^3.0.4"
strip-json-comments "^3.1.1"
+"@floating-ui/core@^1.7.2":
+ version "1.7.2"
+ resolved "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.2.tgz#3d1c35263950b314b6d5a72c8bfb9e3c1551aefd"
+ integrity sha512-wNB5ooIKHQc+Kui96jE/n69rHFWAVoxn5CAzL1Xdd8FG03cgY3MLO+GF9U3W737fYDSgPWA6MReKhBQBop6Pcw==
+ dependencies:
+ "@floating-ui/utils" "^0.2.10"
+
+"@floating-ui/dom@^1.6.12":
+ version "1.7.2"
+ resolved "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.2.tgz#3540b051cf5ce0d4f4db5fb2507a76e8ea5b4a45"
+ integrity sha512-7cfaOQuCS27HD7DX+6ib2OrnW+b4ZBwDNnCcT0uTyidcmyWb03FnQqJybDBoCnpdxwBSfA94UAYlRCt7mV+TbA==
+ dependencies:
+ "@floating-ui/core" "^1.7.2"
+ "@floating-ui/utils" "^0.2.10"
+
+"@floating-ui/utils@^0.2.10":
+ version "0.2.10"
+ resolved "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz#a2a1e3812d14525f725d011a73eceb41fef5bc1c"
+ integrity sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==
+
"@formatjs/ecma402-abstract@1.11.4":
version "1.11.4"
resolved "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-1.11.4.tgz#b962dfc4ae84361f9f08fbce411b4e4340930eda"
@@ -1923,37 +2252,58 @@
faker "^4.1.0"
js-yaml "^3.13.1"
-"@hashicorp/design-system-components@^3.0.2":
- version "3.6.0"
- resolved "https://registry.npmjs.org/@hashicorp/design-system-components/-/design-system-components-3.6.0.tgz#e678123f9d88eef7df2edfdf1666997214fe3273"
- integrity sha512-HV8Wa9fTFCfwCCze7gzg2+U3oSmwNkwYtnrrxln7MXzEKRWl1E2p4BM7ZdyaXu3n092R3wcjy04VwaVSmQqrLw==
- dependencies:
- "@ember/render-modifiers" "^2.0.5"
+"@hashicorp/design-system-components@^4.20.2":
+ version "4.20.2"
+ resolved "https://registry.npmjs.org/@hashicorp/design-system-components/-/design-system-components-4.20.2.tgz#0503dc09493d5647fa835207d766cc1f38faf37d"
+ integrity sha512-0FDaDlvaQQVVXoSoWsExmW1TUgmuJNoCz11JuwaOwin59Vl4ttVLsNvY8DviGJlh6VhV1yYlGJa7X2xhQG+ESQ==
+ dependencies:
+ "@codemirror/commands" "^6.8.0"
+ "@codemirror/lang-go" "^6.0.1"
+ "@codemirror/lang-javascript" "^6.2.2"
+ "@codemirror/lang-json" "^6.0.1"
+ "@codemirror/lang-markdown" "^6.3.2"
+ "@codemirror/lang-sql" "^6.8.0"
+ "@codemirror/lang-yaml" "^6.1.2"
+ "@codemirror/language" "^6.10.3"
+ "@codemirror/legacy-modes" "^6.4.2"
+ "@codemirror/lint" "^6.8.4"
+ "@codemirror/state" "^6.5.0"
+ "@codemirror/view" "^6.36.2"
+ "@ember/render-modifiers" "^2.1.0"
"@ember/string" "^3.1.1"
"@ember/test-waiters" "^3.1.0"
- "@hashicorp/design-system-tokens" "^1.11.0"
- "@hashicorp/ember-flight-icons" "^4.1.0"
- dialog-polyfill "^0.5.6"
- ember-a11y-refocus "^3.0.2"
- ember-auto-import "^2.6.3"
- ember-cli-babel "^8.2.0"
- ember-cli-htmlbars "^6.3.0"
+ "@embroider/addon-shim" "^1.10.0"
+ "@embroider/macros" "^1.18.0"
+ "@embroider/util" "^1.13.2"
+ "@floating-ui/dom" "^1.6.12"
+ "@hashicorp/design-system-tokens" "^2.3.0"
+ "@hashicorp/flight-icons" "^3.11.1"
+ "@lezer/highlight" "^1.2.1"
+ "@nullvoxpopuli/ember-composable-helpers" "^5.2.10"
+ clipboard-polyfill "^4.1.1"
+ codemirror-lang-hcl "^0.0.0-beta.2"
+ decorator-transforms "^2.3.0"
+ ember-a11y-refocus "^4.1.4"
ember-cli-sass "^11.0.1"
- ember-composable-helpers "^5.0.0"
- ember-element-helper "^0.8.5"
- ember-focus-trap "^1.1.0"
- ember-keyboard "^8.2.1"
- ember-stargate "^0.4.3"
- ember-style-modifier "^3.0.1"
- ember-truth-helpers "^3.1.1"
- prismjs "^1.29.0"
- sass "^1.69.5"
+ ember-concurrency "^4.0.4"
+ ember-element-helper "^0.8.6"
+ ember-focus-trap "^1.1.1"
+ ember-get-config "^2.1.1"
+ ember-modifier "^4.2.2"
+ ember-power-select "^8.7.1"
+ ember-stargate "^0.5.0"
+ ember-style-modifier "^4.4.0"
+ ember-truth-helpers "^4.0.3"
+ luxon "^3.4.2"
+ prismjs "^1.30.0"
+ sass "^1.83.0"
+ tabbable "^6.2.0"
tippy.js "^6.3.7"
-"@hashicorp/design-system-tokens@^1.11.0", "@hashicorp/design-system-tokens@^1.9.0":
- version "1.11.0"
- resolved "https://registry.npmjs.org/@hashicorp/design-system-tokens/-/design-system-tokens-1.11.0.tgz#0ae68d06d4297e891ce4ba63465d1f6e742e5554"
- integrity sha512-LPj8IAznpEEhKrrosg3+sW9ss6fVKt8zOC9Ic4Kt5/KZPWLFaP6S8pff5ytvede+cZrAzY3UzZF55u+ev5J9GQ==
+"@hashicorp/design-system-tokens@^2.3.0":
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/@hashicorp/design-system-tokens/-/design-system-tokens-2.3.0.tgz#ea05796cad7e573245db90cd9089e44ac5cae5e1"
+ integrity sha512-T2XhcgUeiGkNqvPu73yittDghEccUpIZc7Fh/g4PG7KEvJwbXItFWTRWoHSGR8T6r6LpOP5E6CC4hSVwGRugRg==
"@hashicorp/ember-cli-api-double@^4.0.0":
version "4.0.0"
@@ -1970,21 +2320,25 @@
pretender "^3.2.0"
recursive-readdir-sync "^1.0.6"
-"@hashicorp/ember-flight-icons@^4.0.1", "@hashicorp/ember-flight-icons@^4.1.0":
- version "4.1.0"
- resolved "https://registry.npmjs.org/@hashicorp/ember-flight-icons/-/ember-flight-icons-4.1.0.tgz#4f73fc6145c94ecd46ef38802722ea2d1fe0e876"
- integrity sha512-X1AL475EPuGu6UkZiS/zqRFgymnIhGfgpY1HwPdavePARmgMr9CcPSwsTeZeV+OXq6yUxMzidijSJUAfEpLb5Q==
+"@hashicorp/ember-flight-icons@4.0.0":
+ version "4.0.0"
+ resolved "https://registry.npmjs.org/@hashicorp/ember-flight-icons/-/ember-flight-icons-4.0.0.tgz#266344c64491be23d7a2e6cef796108eb3208abe"
+ integrity sha512-6uSFNnyqCO4IDLZnybAwvpfLKP81Hkjbx8zD3tT0Ib/YitNFxq3AURnAnnxaMybeuq4pJpA3kav+Bx+8infPZQ==
dependencies:
- "@hashicorp/flight-icons" "^3.0.0"
+ "@hashicorp/flight-icons" "^2.20.0"
ember-auto-import "^2.6.3"
- ember-cli-babel "^8.2.0"
- ember-cli-htmlbars "^6.3.0"
- ember-get-config "^2.1.1"
+ ember-cli-babel "^7.26.11"
+ ember-cli-htmlbars "^6.2.0"
-"@hashicorp/flight-icons@^3.0.0":
- version "3.4.0"
- resolved "https://registry.npmjs.org/@hashicorp/flight-icons/-/flight-icons-3.4.0.tgz#fbd30a9748c36d92934784623e93ce9af48ce957"
- integrity sha512-ddbiKkaXW3LMlXE1MZz0fsaO0rJvpbLQ2Js+Qa1e2yWKQbtSvJluAu9V8mg5jvOlR3HFDskTm8knSxVRd0VjGw==
+"@hashicorp/flight-icons@^2.20.0":
+ version "2.25.0"
+ resolved "https://registry.npmjs.org/@hashicorp/flight-icons/-/flight-icons-2.25.0.tgz#a9f3266525a5824b0c19c8dab22d45a27f1d3d3d"
+ integrity sha512-BFR+xnC7hHgo9QahwFKXUCao4MJLYAnYBb9i924Wz6WAkyNey880nyULedh6J3z/lGx+7VVa7H/xnv4WSFyZyA==
+
+"@hashicorp/flight-icons@^3.11.1":
+ version "3.11.1"
+ resolved "https://registry.npmjs.org/@hashicorp/flight-icons/-/flight-icons-3.11.1.tgz#4c34e9511f8a3fe6d4089da8f539a96cd196359e"
+ integrity sha512-FQOHB2qCzHoG3dm6zidS39D4U0ida/7Sge5EG+KqcebH5jsbJQiMyB/qMc3YQBo5vGBe8XUa+rVW8v4JNpzk1Q==
"@html-next/vertical-collection@^4.0.0":
version "4.0.2"
@@ -2030,7 +2384,7 @@
resolved "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98"
integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==
-"@jridgewell/gen-mapping@^0.3.2", "@jridgewell/gen-mapping@^0.3.5":
+"@jridgewell/gen-mapping@^0.3.5":
version "0.3.5"
resolved "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36"
integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==
@@ -2070,6 +2424,87 @@
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
+"@lezer/common@^1.0.0", "@lezer/common@^1.0.2", "@lezer/common@^1.1.0", "@lezer/common@^1.2.0", "@lezer/common@^1.2.1":
+ version "1.2.3"
+ resolved "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz#138fcddab157d83da557554851017c6c1e5667fd"
+ integrity sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==
+
+"@lezer/css@^1.1.0", "@lezer/css@^1.1.7":
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/@lezer/css/-/css-1.2.1.tgz#b35f6d0459e9be4de1cdf4d3132a59efd7cf2ba3"
+ integrity sha512-2F5tOqzKEKbCUNraIXc0f6HKeyKlmMWJnBB0i4XW6dJgssrZO/YlZ2pY5xgyqDleqqhiNJ3dQhbrV2aClZQMvg==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.3.0"
+
+"@lezer/go@^1.0.0":
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/@lezer/go/-/go-1.0.1.tgz#3004b54f5e4c9719edcba98653f380baf8c0d1a2"
+ integrity sha512-xToRsYxwsgJNHTgNdStpcvmbVuKxTapV0dM0wey1geMMRc9aggoVyKgzYp41D2/vVOx+Ii4hmE206kvxIXBVXQ==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.3.0"
+
+"@lezer/highlight@^1.0.0", "@lezer/highlight@^1.1.3", "@lezer/highlight@^1.2.0", "@lezer/highlight@^1.2.1":
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz#596fa8f9aeb58a608be0a563e960c373cbf23f8b"
+ integrity sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==
+ dependencies:
+ "@lezer/common" "^1.0.0"
+
+"@lezer/html@^1.3.0":
+ version "1.3.10"
+ resolved "https://registry.npmjs.org/@lezer/html/-/html-1.3.10.tgz#1be9a029a6fe835c823b20a98a449a630416b2af"
+ integrity sha512-dqpT8nISx/p9Do3AchvYGV3qYc4/rKr3IBZxlHmpIKam56P47RSHkSF5f13Vu9hebS1jM0HmtJIwLbWz1VIY6w==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.0.0"
+
+"@lezer/javascript@^1.0.0":
+ version "1.5.1"
+ resolved "https://registry.npmjs.org/@lezer/javascript/-/javascript-1.5.1.tgz#2a424a6ec29f1d4ef3c34cbccc5447e373618ad8"
+ integrity sha512-ATOImjeVJuvgm3JQ/bpo2Tmv55HSScE2MTPnKRMRIPx2cLhHGyX2VnqpHhtIV1tVzIjZDbcWQm+NCTF40ggZVw==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.1.3"
+ "@lezer/lr" "^1.3.0"
+
+"@lezer/json@^1.0.0":
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/@lezer/json/-/json-1.0.3.tgz#e773a012ad0088fbf07ce49cfba875cc9e5bc05f"
+ integrity sha512-BP9KzdF9Y35PDpv04r0VeSTKDeox5vVr3efE7eBbx3r4s3oNLfunchejZhjArmeieBH+nVOpgIiBJpEAv8ilqQ==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.0.0"
+
+"@lezer/lr@^1.0.0", "@lezer/lr@^1.3.0", "@lezer/lr@^1.4.0":
+ version "1.4.2"
+ resolved "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz#931ea3dea8e9de84e90781001dae30dea9ff1727"
+ integrity sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==
+ dependencies:
+ "@lezer/common" "^1.0.0"
+
+"@lezer/markdown@^1.0.0":
+ version "1.4.3"
+ resolved "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.4.3.tgz#a742ed5e782ac4913a621dfd1e6a8e409f4dd589"
+ integrity sha512-kfw+2uMrQ/wy/+ONfrH83OkdFNM0ye5Xq96cLlaCy7h5UT9FO54DU4oRoIc0CSBh5NWmWuiIJA7NGLMJbQ+Oxg==
+ dependencies:
+ "@lezer/common" "^1.0.0"
+ "@lezer/highlight" "^1.0.0"
+
+"@lezer/yaml@^1.0.0":
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/@lezer/yaml/-/yaml-1.0.3.tgz#b23770ab42b390056da6b187d861b998fd60b1ff"
+ integrity sha512-GuBLekbw9jDBDhGur82nuwkxKQ+a3W5H0GfaAthDXcAu+XdpS43VlnxA9E9hllkpSP5ellRDKjLLj7Lu9Wr6xA==
+ dependencies:
+ "@lezer/common" "^1.2.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.4.0"
+
"@lit-labs/ssr-dom-shim@^1.0.0":
version "1.2.0"
resolved "https://registry.npmjs.org/@lit-labs/ssr-dom-shim/-/ssr-dom-shim-1.2.0.tgz#353ce4a76c83fadec272ea5674ede767650762fd"
@@ -2097,6 +2532,11 @@
dependencies:
call-bind "^1.0.7"
+"@marijn/find-cluster-break@^1.0.0":
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz#775374306116d51c0c500b8c4face0f9a04752d8"
+ integrity sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==
+
"@mrmlnc/readdir-enhanced@^2.2.1":
version "2.2.1"
resolved "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde"
@@ -2131,6 +2571,104 @@
"@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0"
+"@nullvoxpopuli/ember-composable-helpers@^5.2.10":
+ version "5.2.11"
+ resolved "https://registry.npmjs.org/@nullvoxpopuli/ember-composable-helpers/-/ember-composable-helpers-5.2.11.tgz#ecea309e85efb29bace4a84dc3168d03e8e9fc73"
+ integrity sha512-hdDDhYru0TelepDbh1WpxJlyFYy9bIqdKx3u6Y8FkEjgNnF5RFV7gIUk4u8XB28/3llHAILepCMvRmze9176OA==
+ dependencies:
+ "@embroider/addon-shim" "^1.9.0"
+ decorator-transforms "^2.3.0"
+ ember-functions-as-helper-polyfill "^2.1.2"
+
+"@parcel/watcher-android-arm64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz#507f836d7e2042f798c7d07ad19c3546f9848ac1"
+ integrity sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==
+
+"@parcel/watcher-darwin-arm64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz#3d26dce38de6590ef79c47ec2c55793c06ad4f67"
+ integrity sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==
+
+"@parcel/watcher-darwin-x64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz#99f3af3869069ccf774e4ddfccf7e64fd2311ef8"
+ integrity sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==
+
+"@parcel/watcher-freebsd-x64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz#14d6857741a9f51dfe51d5b08b7c8afdbc73ad9b"
+ integrity sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==
+
+"@parcel/watcher-linux-arm-glibc@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz#43c3246d6892381db473bb4f663229ad20b609a1"
+ integrity sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==
+
+"@parcel/watcher-linux-arm-musl@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz#663750f7090bb6278d2210de643eb8a3f780d08e"
+ integrity sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==
+
+"@parcel/watcher-linux-arm64-glibc@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz#ba60e1f56977f7e47cd7e31ad65d15fdcbd07e30"
+ integrity sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==
+
+"@parcel/watcher-linux-arm64-musl@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz#f7fbcdff2f04c526f96eac01f97419a6a99855d2"
+ integrity sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==
+
+"@parcel/watcher-linux-x64-glibc@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz#4d2ea0f633eb1917d83d483392ce6181b6a92e4e"
+ integrity sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==
+
+"@parcel/watcher-linux-x64-musl@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz#277b346b05db54f55657301dd77bdf99d63606ee"
+ integrity sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==
+
+"@parcel/watcher-win32-arm64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz#7e9e02a26784d47503de1d10e8eab6cceb524243"
+ integrity sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==
+
+"@parcel/watcher-win32-ia32@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz#2d0f94fa59a873cdc584bf7f6b1dc628ddf976e6"
+ integrity sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==
+
+"@parcel/watcher-win32-x64@2.5.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz#ae52693259664ba6f2228fa61d7ee44b64ea0947"
+ integrity sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==
+
+"@parcel/watcher@^2.4.1":
+ version "2.5.1"
+ resolved "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz#342507a9cfaaf172479a882309def1e991fb1200"
+ integrity sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==
+ dependencies:
+ detect-libc "^1.0.3"
+ is-glob "^4.0.3"
+ micromatch "^4.0.5"
+ node-addon-api "^7.0.0"
+ optionalDependencies:
+ "@parcel/watcher-android-arm64" "2.5.1"
+ "@parcel/watcher-darwin-arm64" "2.5.1"
+ "@parcel/watcher-darwin-x64" "2.5.1"
+ "@parcel/watcher-freebsd-x64" "2.5.1"
+ "@parcel/watcher-linux-arm-glibc" "2.5.1"
+ "@parcel/watcher-linux-arm-musl" "2.5.1"
+ "@parcel/watcher-linux-arm64-glibc" "2.5.1"
+ "@parcel/watcher-linux-arm64-musl" "2.5.1"
+ "@parcel/watcher-linux-x64-glibc" "2.5.1"
+ "@parcel/watcher-linux-x64-musl" "2.5.1"
+ "@parcel/watcher-win32-arm64" "2.5.1"
+ "@parcel/watcher-win32-ia32" "2.5.1"
+ "@parcel/watcher-win32-x64" "2.5.1"
+
"@popperjs/core@^2.9.0":
version "2.11.8"
resolved "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f"
@@ -2245,23 +2783,7 @@
dependencies:
"@types/node" "*"
-"@types/eslint-scope@^3.7.3":
- version "3.7.7"
- resolved "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz#3108bd5f18b0cdb277c867b3dd449c9ed7079ac5"
- integrity sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==
- dependencies:
- "@types/eslint" "*"
- "@types/estree" "*"
-
-"@types/eslint@*":
- version "8.56.10"
- resolved "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz#eb2370a73bf04a901eeba8f22595c7ee0f7eb58d"
- integrity sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==
- dependencies:
- "@types/estree" "*"
- "@types/json-schema" "*"
-
-"@types/estree@*", "@types/estree@^1.0.5":
+"@types/estree@^1.0.5":
version "1.0.5"
resolved "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4"
integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==
@@ -2328,7 +2850,7 @@
resolved "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz#7eb47726c391b7345a6ec35ad7f4de469cf5ba4f"
integrity sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==
-"@types/json-schema@*", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
+"@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
version "7.0.15"
resolved "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841"
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
@@ -2420,64 +2942,21 @@
"@webassemblyjs/helper-numbers" "1.11.6"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
-"@webassemblyjs/ast@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz#bd850604b4042459a5a41cd7d338cbed695ed964"
- integrity sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==
- dependencies:
- "@webassemblyjs/helper-module-context" "1.9.0"
- "@webassemblyjs/helper-wasm-bytecode" "1.9.0"
- "@webassemblyjs/wast-parser" "1.9.0"
-
"@webassemblyjs/floating-point-hex-parser@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz#dacbcb95aff135c8260f77fa3b4c5fea600a6431"
integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==
-"@webassemblyjs/floating-point-hex-parser@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz#3c3d3b271bddfc84deb00f71344438311d52ffb4"
- integrity sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==
-
"@webassemblyjs/helper-api-error@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz#6132f68c4acd59dcd141c44b18cbebbd9f2fa768"
integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==
-"@webassemblyjs/helper-api-error@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz#203f676e333b96c9da2eeab3ccef33c45928b6a2"
- integrity sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==
-
"@webassemblyjs/helper-buffer@1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz#6df20d272ea5439bf20ab3492b7fb70e9bfcb3f6"
integrity sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==
-"@webassemblyjs/helper-buffer@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz#a1442d269c5feb23fcbc9ef759dac3547f29de00"
- integrity sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==
-
-"@webassemblyjs/helper-code-frame@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz#647f8892cd2043a82ac0c8c5e75c36f1d9159f27"
- integrity sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==
- dependencies:
- "@webassemblyjs/wast-printer" "1.9.0"
-
-"@webassemblyjs/helper-fsm@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz#c05256b71244214671f4b08ec108ad63b70eddb8"
- integrity sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==
-
-"@webassemblyjs/helper-module-context@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz#25d8884b76839871a08a6c6f806c3979ef712f07"
- integrity sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
-
"@webassemblyjs/helper-numbers@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz#cbce5e7e0c1bd32cf4905ae444ef64cea919f1b5"
@@ -2492,11 +2971,6 @@
resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz#bb2ebdb3b83aa26d9baad4c46d4315283acd51e9"
integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==
-"@webassemblyjs/helper-wasm-bytecode@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz#4fed8beac9b8c14f8c58b70d124d549dd1fe5790"
- integrity sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==
-
"@webassemblyjs/helper-wasm-section@1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz#3da623233ae1a60409b509a52ade9bc22a37f7bf"
@@ -2507,16 +2981,6 @@
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
"@webassemblyjs/wasm-gen" "1.12.1"
-"@webassemblyjs/helper-wasm-section@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz#5a4138d5a6292ba18b04c5ae49717e4167965346"
- integrity sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-buffer" "1.9.0"
- "@webassemblyjs/helper-wasm-bytecode" "1.9.0"
- "@webassemblyjs/wasm-gen" "1.9.0"
-
"@webassemblyjs/ieee754@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz#bb665c91d0b14fffceb0e38298c329af043c6e3a"
@@ -2524,13 +2988,6 @@
dependencies:
"@xtuc/ieee754" "^1.2.0"
-"@webassemblyjs/ieee754@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz#15c7a0fbaae83fb26143bbacf6d6df1702ad39e4"
- integrity sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==
- dependencies:
- "@xtuc/ieee754" "^1.2.0"
-
"@webassemblyjs/leb128@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz#70e60e5e82f9ac81118bc25381a0b283893240d7"
@@ -2538,37 +2995,11 @@
dependencies:
"@xtuc/long" "4.2.2"
-"@webassemblyjs/leb128@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz#f19ca0b76a6dc55623a09cffa769e838fa1e1c95"
- integrity sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==
- dependencies:
- "@xtuc/long" "4.2.2"
-
"@webassemblyjs/utf8@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz#90f8bc34c561595fe156603be7253cdbcd0fab5a"
integrity sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==
-"@webassemblyjs/utf8@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz#04d33b636f78e6a6813227e82402f7637b6229ab"
- integrity sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==
-
-"@webassemblyjs/wasm-edit@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz#3fe6d79d3f0f922183aa86002c42dd256cfee9cf"
- integrity sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-buffer" "1.9.0"
- "@webassemblyjs/helper-wasm-bytecode" "1.9.0"
- "@webassemblyjs/helper-wasm-section" "1.9.0"
- "@webassemblyjs/wasm-gen" "1.9.0"
- "@webassemblyjs/wasm-opt" "1.9.0"
- "@webassemblyjs/wasm-parser" "1.9.0"
- "@webassemblyjs/wast-printer" "1.9.0"
-
"@webassemblyjs/wasm-edit@^1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz#9f9f3ff52a14c980939be0ef9d5df9ebc678ae3b"
@@ -2594,17 +3025,6 @@
"@webassemblyjs/leb128" "1.11.6"
"@webassemblyjs/utf8" "1.11.6"
-"@webassemblyjs/wasm-gen@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz#50bc70ec68ded8e2763b01a1418bf43491a7a49c"
- integrity sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-wasm-bytecode" "1.9.0"
- "@webassemblyjs/ieee754" "1.9.0"
- "@webassemblyjs/leb128" "1.9.0"
- "@webassemblyjs/utf8" "1.9.0"
-
"@webassemblyjs/wasm-opt@1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz#9e6e81475dfcfb62dab574ac2dda38226c232bc5"
@@ -2615,16 +3035,6 @@
"@webassemblyjs/wasm-gen" "1.12.1"
"@webassemblyjs/wasm-parser" "1.12.1"
-"@webassemblyjs/wasm-opt@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz#2211181e5b31326443cc8112eb9f0b9028721a61"
- integrity sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-buffer" "1.9.0"
- "@webassemblyjs/wasm-gen" "1.9.0"
- "@webassemblyjs/wasm-parser" "1.9.0"
-
"@webassemblyjs/wasm-parser@1.12.1", "@webassemblyjs/wasm-parser@^1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz#c47acb90e6f083391e3fa61d113650eea1e95937"
@@ -2637,30 +3047,6 @@
"@webassemblyjs/leb128" "1.11.6"
"@webassemblyjs/utf8" "1.11.6"
-"@webassemblyjs/wasm-parser@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz#9d48e44826df4a6598294aa6c87469d642fff65e"
- integrity sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-api-error" "1.9.0"
- "@webassemblyjs/helper-wasm-bytecode" "1.9.0"
- "@webassemblyjs/ieee754" "1.9.0"
- "@webassemblyjs/leb128" "1.9.0"
- "@webassemblyjs/utf8" "1.9.0"
-
-"@webassemblyjs/wast-parser@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz#3031115d79ac5bd261556cecc3fa90a3ef451914"
- integrity sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/floating-point-hex-parser" "1.9.0"
- "@webassemblyjs/helper-api-error" "1.9.0"
- "@webassemblyjs/helper-code-frame" "1.9.0"
- "@webassemblyjs/helper-fsm" "1.9.0"
- "@xtuc/long" "4.2.2"
-
"@webassemblyjs/wast-printer@1.12.1":
version "1.12.1"
resolved "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz#bcecf661d7d1abdaf989d8341a4833e33e2b31ac"
@@ -2669,15 +3055,6 @@
"@webassemblyjs/ast" "1.12.1"
"@xtuc/long" "4.2.2"
-"@webassemblyjs/wast-printer@1.9.0":
- version "1.9.0"
- resolved "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz#4935d54c85fef637b00ce9f52377451d00d47899"
- integrity sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/wast-parser" "1.9.0"
- "@xtuc/long" "4.2.2"
-
"@xmldom/xmldom@^0.8.0":
version "0.8.10"
resolved "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz#a1337ca426aa61cef9fe15b5b28e340a72f6fa99"
@@ -2746,11 +3123,6 @@ acorn-walk@^7.1.1:
resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc"
integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==
-acorn@^6.4.1:
- version "6.4.2"
- resolved "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6"
- integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==
-
acorn@^7.1.1, acorn@^7.4.0:
version "7.4.1"
resolved "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa"
@@ -2776,11 +3148,6 @@ aggregate-error@^3.0.0:
clean-stack "^2.0.0"
indent-string "^4.0.0"
-ajv-errors@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d"
- integrity sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==
-
ajv-formats@^2.1.1:
version "2.1.1"
resolved "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz#6e669400659eb74973bbf2e33327180a0996b520"
@@ -2788,7 +3155,7 @@ ajv-formats@^2.1.1:
dependencies:
ajv "^8.0.0"
-ajv-keywords@^3.1.0, ajv-keywords@^3.4.1, ajv-keywords@^3.5.2:
+ajv-keywords@^3.5.2:
version "3.5.2"
resolved "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d"
integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==
@@ -2800,7 +3167,7 @@ ajv-keywords@^5.1.0:
dependencies:
fast-deep-equal "^3.1.3"
-ajv@^6.1.0, ajv@^6.10.0, ajv@^6.10.2, ajv@^6.12.4, ajv@^6.12.5:
+ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5:
version "6.12.6"
resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@@ -2920,11 +3287,6 @@ ansicolors@~0.2.1:
resolved "https://registry.npmjs.org/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
integrity sha512-tOIuy1/SK/dr94ZA0ckDohKXNeBNqZ4us6PjMVLs5h1w2GBB6uPtOknp2+VF4F/zcy9LI70W+Z+pE2Soajky1w==
-any-promise@^1.0.0:
- version "1.3.0"
- resolved "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f"
- integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==
-
anymatch@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb"
@@ -2933,14 +3295,6 @@ anymatch@^2.0.0:
micromatch "^3.1.4"
normalize-path "^2.1.1"
-anymatch@~3.1.2:
- version "3.1.3"
- resolved "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e"
- integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==
- dependencies:
- normalize-path "^3.0.0"
- picomatch "^2.0.4"
-
aot-test-generators@^0.1.0:
version "0.1.0"
resolved "https://registry.npmjs.org/aot-test-generators/-/aot-test-generators-0.1.0.tgz#43f0f615f97cb298d7919c1b0b4e6b7310b03cd0"
@@ -2948,7 +3302,7 @@ aot-test-generators@^0.1.0:
dependencies:
jsesc "^2.5.0"
-aproba@^1.0.3, aproba@^1.1.1:
+aproba@^1.0.3:
version "1.2.0"
resolved "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a"
integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==
@@ -2974,11 +3328,6 @@ are-we-there-yet@~1.1.2:
delegates "^1.0.0"
readable-stream "^2.0.6"
-arg@^5.0.2:
- version "5.0.2"
- resolved "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz#c81433cc427c92c4dcf4865142dbca6f15acd59c"
- integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==
-
argparse@^1.0.7:
version "1.0.10"
resolved "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911"
@@ -2991,16 +3340,6 @@ argparse@^2.0.1:
resolved "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
-arr-diff@^4.0.0:
- version "4.0.0"
- resolved "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520"
- integrity sha512-YVIQ82gZPGBebQV/a8dar4AitzCQs0jjXwMPZllpXMaGjXPYVUawSxQrRsjhjupyVxEvbHgUmIhKVlND+j02kA==
-
-arr-union@^3.1.0:
- version "3.1.0"
- resolved "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4"
- integrity sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==
-
array-buffer-byte-length@^1.0.0, array-buffer-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f"
@@ -3046,11 +3385,6 @@ array-union@^2.1.0:
resolved "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d"
integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
-array-unique@^0.3.2:
- version "0.3.2"
- resolved "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428"
- integrity sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==
-
array.prototype.every@^1.1.6:
version "1.1.6"
resolved "https://registry.npmjs.org/array.prototype.every/-/array.prototype.every-1.1.6.tgz#1717b407d019913250317300d814a1b6660f10d7"
@@ -3081,33 +3415,11 @@ asap@^2.0.0:
resolved "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46"
integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==
-asn1.js@^4.10.1:
- version "4.10.1"
- resolved "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0"
- integrity sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==
- dependencies:
- bn.js "^4.0.0"
- inherits "^2.0.1"
- minimalistic-assert "^1.0.0"
-
assert-never@^1.1.0, assert-never@^1.2.1:
version "1.3.0"
resolved "https://registry.npmjs.org/assert-never/-/assert-never-1.3.0.tgz#c53cf3ad8fcdb67f400a941dea66dac7fe82dd2e"
integrity sha512-9Z3vxQ+berkL/JJo0dK+EY3Lp0s3NtSnP3VCLsh5HDcZPrh0M+KQRK5sWhUeyPPH+/RCxZqOxLMR+YC6vlviEQ==
-assert@^1.1.1:
- version "1.5.1"
- resolved "https://registry.npmjs.org/assert/-/assert-1.5.1.tgz#038ab248e4ff078e7bc2485ba6e6388466c78f76"
- integrity sha512-zzw1uCAgLbsKwBfFc8CX78DDg+xZeBksSO3vwVIDDN5i94eOrPsSSyiVhmsSABFDM/OcpE2aagCat9dnWQLG1A==
- dependencies:
- object.assign "^4.1.4"
- util "^0.10.4"
-
-assign-symbols@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367"
- integrity sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==
-
ast-types@0.13.3:
version "0.13.3"
resolved "https://registry.npmjs.org/ast-types/-/ast-types-0.13.3.tgz#50da3f28d17bdbc7969a3a2d83a0e4a72ae755a7"
@@ -3144,11 +3456,6 @@ async-disk-cache@^2.0.0:
rsvp "^4.8.5"
username-sync "^1.0.2"
-async-each@^1.0.1:
- version "1.0.6"
- resolved "https://registry.npmjs.org/async-each/-/async-each-1.0.6.tgz#52f1d9403818c179b7561e11a5d1b77eb2160e77"
- integrity sha512-c646jH1avxr+aVpndVMeAfYw7wAa6idufrlN3LPA4PmKS0QEGp6PIC9nwz0WQkkvBGAMEki3pFdtxaF39J9vvg==
-
async-promise-queue@^1.0.3, async-promise-queue@^1.0.5:
version "1.0.5"
resolved "https://registry.npmjs.org/async-promise-queue/-/async-promise-queue-1.0.5.tgz#cb23bce9fce903a133946a700cc85f27f09ea49d"
@@ -3386,7 +3693,7 @@ babel-import-util@^1.1.0:
resolved "https://registry.npmjs.org/babel-import-util/-/babel-import-util-1.4.1.tgz#1df6fd679845df45494bac9ca12461d49497fdd4"
integrity sha512-TNdiTQdPhXlx02pzG//UyVPSKE7SNWjY0n4So/ZnjQpWwaM5LvWBLkWa1JKll5u06HNscHD91XZPuwrMg1kadQ==
-babel-import-util@^2.0.0:
+babel-import-util@^2.0.0, babel-import-util@^2.0.1:
version "2.1.1"
resolved "https://registry.npmjs.org/babel-import-util/-/babel-import-util-2.1.1.tgz#0f4905fe899abfb8cd835dd52f3df1966d1ffbb0"
integrity sha512-3qBQWRjzP9NreSH/YrOEU1Lj5F60+pWSLP0kIdCWxjFHH7pX2YPHIxQ67el4gnMNfYoDxSDGcT0zpVlZ+gVtQA==
@@ -3396,6 +3703,11 @@ babel-import-util@^3.0.0:
resolved "https://registry.npmjs.org/babel-import-util/-/babel-import-util-3.0.0.tgz#5814c6a58e7b80e64156b48fdfd34d48e6e0b1df"
integrity sha512-4YNPkuVsxAW5lnSTa6cn4Wk49RX6GAB6vX+M6LqEtN0YePqoFczv1/x0EyLK/o+4E1j9jEuYj5Su7IEPab5JHQ==
+babel-import-util@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmjs.org/babel-import-util/-/babel-import-util-3.0.1.tgz#62dd0476e855bf57522e1d0027916dc0c0b0fdb2"
+ integrity sha512-2copPaWQFUrzooJVIVZA/Oppx/S/KOoZ4Uhr+XWEQDMZ8Rvq/0SNQpbdIyMBJ8IELWt10dewuJw+tX4XjOo7Rg==
+
babel-loader@^8.0.6, babel-loader@^8.1.0:
version "8.3.0"
resolved "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz#124936e841ba4fe8176786d6ff28add1f134d6a8"
@@ -3520,17 +3832,6 @@ babel-plugin-module-resolver@^4.1.0:
reselect "^4.0.0"
resolve "^1.13.1"
-babel-plugin-module-resolver@^5.0.0:
- version "5.0.2"
- resolved "https://registry.npmjs.org/babel-plugin-module-resolver/-/babel-plugin-module-resolver-5.0.2.tgz#cdeac5d4aaa3b08dd1ac23ddbf516660ed2d293e"
- integrity sha512-9KtaCazHee2xc0ibfqsDeamwDps6FZNo5S0Q81dUqEuFzVwPhcT4J5jOqIVvgCA3Q/wO9hKYxN/Ds3tIsp5ygg==
- dependencies:
- find-babel-config "^2.1.1"
- glob "^9.3.3"
- pkg-up "^3.1.0"
- reselect "^4.1.7"
- resolve "^1.22.8"
-
babel-plugin-polyfill-corejs2@^0.4.10:
version "0.4.11"
resolved "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz#30320dfe3ffe1a336c15afdcdafd6fd615b25e33"
@@ -3937,7 +4238,7 @@ balanced-match@^1.0.0:
resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
-base64-js@^1.0.2, base64-js@^1.3.0, base64-js@^1.3.1:
+base64-js@^1.3.0, base64-js@^1.3.1:
version "1.5.1"
resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
@@ -3947,19 +4248,6 @@ base64id@2.0.0, base64id@~2.0.0:
resolved "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz#2770ac6bc47d312af97a8bf9a634342e0cd25cb6"
integrity sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==
-base@^0.11.1:
- version "0.11.2"
- resolved "https://registry.npmjs.org/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f"
- integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==
- dependencies:
- cache-base "^1.0.1"
- class-utils "^0.3.5"
- component-emitter "^1.2.1"
- define-property "^1.0.0"
- isobject "^3.0.1"
- mixin-deep "^1.2.0"
- pascalcase "^0.1.1"
-
basic-auth@~2.0.1:
version "2.0.1"
resolved "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz#b998279bf47ce38344b4f3cf916d4679bbf51e3a"
@@ -3967,33 +4255,23 @@ basic-auth@~2.0.1:
dependencies:
safe-buffer "5.1.2"
+better-path-resolve@1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/better-path-resolve/-/better-path-resolve-1.0.0.tgz#13a35a1104cdd48a7b74bf8758f96a1ee613f99d"
+ integrity sha512-pbnl5XzGBdrFU/wT4jqmJVPn2B6UHPBOhzMQkY/SPUPB6QtUXtmBHBIwCbXJol93mOpGMnQyP/+BB19q04xj7g==
+ dependencies:
+ is-windows "^1.0.0"
+
big.js@^5.2.2:
version "5.2.2"
resolved "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328"
integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==
-binary-extensions@^1.0.0:
- version "1.13.1"
- resolved "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz#598afe54755b2868a5330d2aff9d4ebb53209b65"
- integrity sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==
-
-binary-extensions@^2.0.0:
- version "2.3.0"
- resolved "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522"
- integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==
-
"binaryextensions@1 || 2", binaryextensions@^2.1.2:
version "2.3.0"
resolved "https://registry.npmjs.org/binaryextensions/-/binaryextensions-2.3.0.tgz#1d269cbf7e6243ea886aa41453c3651ccbe13c22"
integrity sha512-nAihlQsYGyc5Bwq6+EsubvANYGExeJKHDO3RjnvwU042fawQTQfM3Kxn7IHUXQOz4bzfwsGYYHGSvXyW4zOGLg==
-bindings@^1.5.0:
- version "1.5.0"
- resolved "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df"
- integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==
- dependencies:
- file-uri-to-path "1.0.0"
-
bl@^4.1.0:
version "4.1.0"
resolved "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a"
@@ -4008,7 +4286,7 @@ blank-object@^1.0.1:
resolved "https://registry.npmjs.org/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
integrity sha512-kXQ19Xhoghiyw66CUiGypnuRpWlbHAzY/+NyvqTEdTfhfQGH1/dbEMYiXju7fYKIFePpzp/y9dsu5Cu/PkmawQ==
-bluebird@^3.4.6, bluebird@^3.5.5, bluebird@^3.7.2:
+bluebird@^3.4.6, bluebird@^3.7.2:
version "3.7.2"
resolved "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f"
integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==
@@ -4018,20 +4296,15 @@ blueimp-md5@^2.10.0:
resolved "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.19.0.tgz#b53feea5498dcb53dc6ec4b823adb84b729c4af0"
integrity sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==
-bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.11.9:
+bn.js@^4.11.9:
version "4.12.0"
resolved "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88"
integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==
-bn.js@^5.0.0, bn.js@^5.2.1:
- version "5.2.1"
- resolved "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70"
- integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==
-
-body-parser@1.20.2, body-parser@^1.19.0:
- version "1.20.2"
- resolved "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd"
- integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==
+body-parser@1.20.2, body-parser@1.20.3, body-parser@^1.19.0:
+ version "1.20.3"
+ resolved "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz#1953431221c6fb5cd63c4b36d53fab0928e548c6"
+ integrity sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==
dependencies:
bytes "3.1.2"
content-type "~1.0.5"
@@ -4041,7 +4314,7 @@ body-parser@1.20.2, body-parser@^1.19.0:
http-errors "2.0.0"
iconv-lite "0.4.24"
on-finished "2.4.1"
- qs "6.11.0"
+ qs "6.13.0"
raw-body "2.5.2"
type-is "~1.6.18"
unpipe "1.0.0"
@@ -4081,7 +4354,7 @@ brace-expansion@^1.1.7:
balanced-match "^1.0.0"
concat-map "0.0.1"
-braces@^2.3.1, braces@^2.3.2, braces@^3.0.0, braces@^3.0.3, braces@~3.0.2:
+braces@^3.0.0, braces@^3.0.3:
version "3.0.3"
resolved "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789"
integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==
@@ -4149,20 +4422,6 @@ broccoli-babel-transpiler@^7.8.0:
rsvp "^4.8.4"
workerpool "^3.1.1"
-broccoli-babel-transpiler@^8.0.0:
- version "8.0.0"
- resolved "https://registry.npmjs.org/broccoli-babel-transpiler/-/broccoli-babel-transpiler-8.0.0.tgz#07576728a95b840a99d5f0f9b07b71a737f69319"
- integrity sha512-3HEp3flvasUKJGWERcrPgM1SWvHJ0O/fmbEtY9L4kDyMSnqjY6hTYvNvgWCIgbwXAYAUlZP0vjAQsmyLNGLwFw==
- dependencies:
- broccoli-persistent-filter "^3.0.0"
- clone "^2.1.2"
- hash-for-dep "^1.4.7"
- heimdalljs "^0.2.1"
- heimdalljs-logger "^0.1.9"
- json-stable-stringify "^1.0.1"
- rsvp "^4.8.4"
- workerpool "^6.0.2"
-
broccoli-bridge@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/broccoli-bridge/-/broccoli-bridge-1.0.0.tgz#6223fd64b62062c31333539f0f3c42d0acd92fb1"
@@ -4359,7 +4618,7 @@ broccoli-funnel@^2.0.0, broccoli-funnel@^2.0.1, broccoli-funnel@^2.0.2:
symlink-or-copy "^1.0.0"
walk-sync "^0.3.1"
-broccoli-funnel@^3.0.0, broccoli-funnel@^3.0.2, broccoli-funnel@^3.0.3, broccoli-funnel@^3.0.5, broccoli-funnel@^3.0.8:
+broccoli-funnel@^3.0.2, broccoli-funnel@^3.0.3, broccoli-funnel@^3.0.5, broccoli-funnel@^3.0.8:
version "3.0.8"
resolved "https://registry.npmjs.org/broccoli-funnel/-/broccoli-funnel-3.0.8.tgz#f5b62e2763c3918026a15a3c833edc889971279b"
integrity sha512-ng4eIhPYiXqMw6SyGoxPHR3YAwEd2lr9FgBI1CyTbspl4txZovOsmzFkMkGAlu88xyvYXJqHiM2crfLa65T1BQ==
@@ -4516,7 +4775,7 @@ broccoli-persistent-filter@^2.1.0, broccoli-persistent-filter@^2.2.1, broccoli-p
sync-disk-cache "^1.3.3"
walk-sync "^1.0.0"
-broccoli-persistent-filter@^3.0.0, broccoli-persistent-filter@^3.1.1, broccoli-persistent-filter@^3.1.2:
+broccoli-persistent-filter@^3.1.2:
version "3.1.3"
resolved "https://registry.npmjs.org/broccoli-persistent-filter/-/broccoli-persistent-filter-3.1.3.tgz#aca815bf3e3b0247bd0a7b567fdb0d0e08c99cc2"
integrity sha512-Q+8iezprZzL9voaBsDY3rQVl7c7H5h+bvv8SpzCZXPZgfBFCbx7KFQ2c3rZR6lW5k4Kwoqt7jG+rZMUg67Gwxw==
@@ -4589,29 +4848,6 @@ broccoli-plugin@^3.1.0:
rimraf "^2.3.4"
symlink-or-copy "^1.1.8"
-broccoli-postcss-single@^5.0.1:
- version "5.0.2"
- resolved "https://registry.npmjs.org/broccoli-postcss-single/-/broccoli-postcss-single-5.0.2.tgz#f23661b3011494d8a2dbd8ff39eb394e80313682"
- integrity sha512-r4eWtz/5uihtHwOszViWwV6weJr9VryvaqtVo1DOh4gL+TbTyU+NX+Y+t9TqUw99OtuivMz4uHLLH7zZECbZmw==
- dependencies:
- broccoli-caching-writer "^3.0.3"
- include-path-searcher "^0.1.0"
- minimist ">=1.2.5"
- mkdirp "^1.0.3"
- object-assign "^4.1.1"
- postcss "^8.1.4"
-
-broccoli-postcss@^6.0.1:
- version "6.1.0"
- resolved "https://registry.npmjs.org/broccoli-postcss/-/broccoli-postcss-6.1.0.tgz#1e15c5e8a65a984544224f083cbd1e6763691b60"
- integrity sha512-I8+DHq5xcCBHU0PpCtDMayAmSUVx07CqAquUpdlNUHckXeD//cUFf4aFQllnZBhF8Z86YLhuA+j7qvCYYgBXRg==
- dependencies:
- broccoli-funnel "^3.0.0"
- broccoli-persistent-filter "^3.1.1"
- minimist ">=1.2.5"
- object-assign "^4.1.1"
- postcss "^8.1.4"
-
broccoli-rollup@^5.0.0:
version "5.0.0"
resolved "https://registry.npmjs.org/broccoli-rollup/-/broccoli-rollup-5.0.0.tgz#a77b53bcef1b70e988913fee82265c0a4ca530da"
@@ -4653,7 +4889,7 @@ broccoli-source@^2.1.2:
resolved "https://registry.npmjs.org/broccoli-source/-/broccoli-source-2.1.2.tgz#e9ae834f143b607e9ec114ade66731500c38b90b"
integrity sha512-1lLayO4wfS0c0Sj50VfHJXNWf94FYY0WUhxj0R77thbs6uWI7USiOWFqQV5dRmhAJnoKaGN4WyLGQbgjgiYFwQ==
-broccoli-source@^3.0.0, broccoli-source@^3.0.1:
+broccoli-source@^3.0.0:
version "3.0.1"
resolved "https://registry.npmjs.org/broccoli-source/-/broccoli-source-3.0.1.tgz#fd581b2f3877ca1338f724f6ef70acec8c7e1444"
integrity sha512-ZbGVQjivWi0k220fEeIUioN6Y68xjMy0xiLAc0LdieHI99gw+tafU8w0CggBDYVNsJMKUr006AZaM7gNEwCxEg==
@@ -4737,7 +4973,7 @@ broccoli@^3.5.1:
underscore.string "^3.2.2"
watch-detector "^1.0.0"
-brorand@^1.0.1, brorand@^1.1.0:
+brorand@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f"
integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==
@@ -4747,68 +4983,6 @@ browser-process-hrtime@^1.0.0:
resolved "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626"
integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==
-browserify-aes@^1.0.4, browserify-aes@^1.2.0:
- version "1.2.0"
- resolved "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48"
- integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==
- dependencies:
- buffer-xor "^1.0.3"
- cipher-base "^1.0.0"
- create-hash "^1.1.0"
- evp_bytestokey "^1.0.3"
- inherits "^2.0.1"
- safe-buffer "^5.0.1"
-
-browserify-cipher@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0"
- integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==
- dependencies:
- browserify-aes "^1.0.4"
- browserify-des "^1.0.0"
- evp_bytestokey "^1.0.0"
-
-browserify-des@^1.0.0:
- version "1.0.2"
- resolved "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c"
- integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==
- dependencies:
- cipher-base "^1.0.1"
- des.js "^1.0.0"
- inherits "^2.0.1"
- safe-buffer "^5.1.2"
-
-browserify-rsa@^4.0.0, browserify-rsa@^4.1.0:
- version "4.1.0"
- resolved "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz#b2fd06b5b75ae297f7ce2dc651f918f5be158c8d"
- integrity sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==
- dependencies:
- bn.js "^5.0.0"
- randombytes "^2.0.1"
-
-browserify-sign@^4.0.0:
- version "4.2.3"
- resolved "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.3.tgz#7afe4c01ec7ee59a89a558a4b75bd85ae62d4208"
- integrity sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==
- dependencies:
- bn.js "^5.2.1"
- browserify-rsa "^4.1.0"
- create-hash "^1.2.0"
- create-hmac "^1.1.7"
- elliptic "^6.5.5"
- hash-base "~3.0"
- inherits "^2.0.4"
- parse-asn1 "^5.1.7"
- readable-stream "^2.3.8"
- safe-buffer "^5.2.1"
-
-browserify-zlib@^0.2.0:
- version "0.2.0"
- resolved "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f"
- integrity sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==
- dependencies:
- pako "~1.0.5"
-
browserslist@^3.2.6:
version "3.2.8"
resolved "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz#b0005361d6471f0f5952797a76fc985f1f978fc6"
@@ -4839,20 +5013,6 @@ buffer-from@^1.0.0:
resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
-buffer-xor@^1.0.3:
- version "1.0.3"
- resolved "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9"
- integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==
-
-buffer@^4.3.0:
- version "4.9.2"
- resolved "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz#230ead344002988644841ab0244af8c44bbe3ef8"
- integrity sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==
- dependencies:
- base64-js "^1.0.2"
- ieee754 "^1.1.4"
- isarray "^1.0.0"
-
buffer@^5.5.0:
version "5.7.1"
resolved "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0"
@@ -4861,11 +5021,6 @@ buffer@^5.5.0:
base64-js "^1.3.1"
ieee754 "^1.1.13"
-builtin-status-codes@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8"
- integrity sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ==
-
builtins@^1.0.3:
version "1.0.3"
resolved "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
@@ -4886,42 +5041,6 @@ bytes@3.1.2:
resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5"
integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==
-cacache@^12.0.2:
- version "12.0.4"
- resolved "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz#668bcbd105aeb5f1d92fe25570ec9525c8faa40c"
- integrity sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==
- dependencies:
- bluebird "^3.5.5"
- chownr "^1.1.1"
- figgy-pudding "^3.5.1"
- glob "^7.1.4"
- graceful-fs "^4.1.15"
- infer-owner "^1.0.3"
- lru-cache "^5.1.1"
- mississippi "^3.0.0"
- mkdirp "^0.5.1"
- move-concurrently "^1.0.1"
- promise-inflight "^1.0.1"
- rimraf "^2.6.3"
- ssri "^6.0.1"
- unique-filename "^1.1.1"
- y18n "^4.0.0"
-
-cache-base@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2"
- integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==
- dependencies:
- collection-visit "^1.0.0"
- component-emitter "^1.2.1"
- get-value "^2.0.6"
- has-value "^1.0.0"
- isobject "^3.0.1"
- set-value "^2.0.0"
- to-object-path "^0.3.0"
- union-value "^1.0.0"
- unset-value "^1.0.0"
-
calculate-cache-key-for-tree@2.0.0, calculate-cache-key-for-tree@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/calculate-cache-key-for-tree/-/calculate-cache-key-for-tree-2.0.0.tgz#7ac57f149a4188eacb0a45b210689215d3fef8d6"
@@ -4936,6 +5055,14 @@ calculate-cache-key-for-tree@^1.1.0:
dependencies:
json-stable-stringify "^1.0.1"
+call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6"
+ integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==
+ dependencies:
+ es-errors "^1.3.0"
+ function-bind "^1.1.2"
+
call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7:
version "1.0.7"
resolved "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9"
@@ -4957,11 +5084,6 @@ callsites@^3.0.0, callsites@^3.1.0:
resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
-camelcase-css@^2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5"
- integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==
-
camelcase@^5.3.1:
version "5.3.1"
resolved "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
@@ -5072,44 +5194,12 @@ charm@^1.0.0:
dependencies:
inherits "^2.0.1"
-"chokidar@>=3.0.0 <4.0.0", chokidar@^3.4.1, chokidar@^3.5.3:
- version "3.6.0"
- resolved "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b"
- integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==
- dependencies:
- anymatch "~3.1.2"
- braces "~3.0.2"
- glob-parent "~5.1.2"
- is-binary-path "~2.1.0"
- is-glob "~4.0.1"
- normalize-path "~3.0.0"
- readdirp "~3.6.0"
- optionalDependencies:
- fsevents "~2.3.2"
-
-chokidar@^2.1.8:
- version "2.1.8"
- resolved "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz#804b3a7b6a99358c3c5c61e71d8728f041cff917"
- integrity sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==
+chokidar@^4.0.0:
+ version "4.0.3"
+ resolved "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz#7be37a4c03c9aee1ecfe862a4a23b2c70c205d30"
+ integrity sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==
dependencies:
- anymatch "^2.0.0"
- async-each "^1.0.1"
- braces "^2.3.2"
- glob-parent "^3.1.0"
- inherits "^2.0.3"
- is-binary-path "^1.0.0"
- is-glob "^4.0.0"
- normalize-path "^3.0.0"
- path-is-absolute "^1.0.0"
- readdirp "^2.2.1"
- upath "^1.1.1"
- optionalDependencies:
- fsevents "^1.2.7"
-
-chownr@^1.1.1:
- version "1.1.4"
- resolved "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b"
- integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==
+ readdirp "^4.0.1"
chrome-trace-event@^1.0.2:
version "1.0.4"
@@ -5121,24 +5211,6 @@ ci-info@^2.0.0:
resolved "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46"
integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==
-cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3:
- version "1.0.4"
- resolved "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de"
- integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==
- dependencies:
- inherits "^2.0.1"
- safe-buffer "^5.0.1"
-
-class-utils@^0.3.5:
- version "0.3.6"
- resolved "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463"
- integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==
- dependencies:
- arr-union "^3.1.0"
- define-property "^0.2.5"
- isobject "^3.0.0"
- static-extend "^0.1.1"
-
cldr-core@^36.0.0:
version "36.0.0"
resolved "https://registry.npmjs.org/cldr-core/-/cldr-core-36.0.0.tgz#1d2148ed6802411845baeeb21432d7bbfde7d4f7"
@@ -5229,6 +5301,11 @@ cli-width@^3.0.0:
resolved "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6"
integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==
+clipboard-polyfill@^4.1.1:
+ version "4.1.1"
+ resolved "https://registry.npmjs.org/clipboard-polyfill/-/clipboard-polyfill-4.1.1.tgz#eaf074f91c0a55aa4c12fcfd4862d2cfb9a0cab9"
+ integrity sha512-nbvNLrcX0zviek5QHLFRAaLrx8y/s8+RF2stH43tuS+kP5XlHMrcD0UGBWq43Hwp6WuuK7KefRMP56S45ibZkA==
+
clipboard@^2.0.11:
version "2.0.11"
resolved "https://registry.npmjs.org/clipboard/-/clipboard-2.0.11.tgz#62180360b97dd668b6b3a84ec226975762a70be5"
@@ -5262,19 +5339,20 @@ code-point-at@^1.0.0:
resolved "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
integrity sha512-RpAVKQA5T63xEj6/giIbUEtZwJ4UFIc3ZtvEkiaUERylqe8xb5IvqcgOurZLahv93CLKfxcw5YI+DZcUBRyLXA==
-codemirror@5.58.2, codemirror@~5.15.0:
+codemirror-lang-hcl@^0.0.0-beta.2:
+ version "0.0.0-beta.2"
+ resolved "https://registry.npmjs.org/codemirror-lang-hcl/-/codemirror-lang-hcl-0.0.0-beta.2.tgz#05ab6dfa6399c5987942e2eb5051f3426d44aad5"
+ integrity sha512-R3ew7Z2EYTdHTMXsWKBW9zxnLoLPYO+CrAa3dPZjXLrIR96Q3GR4cwJKF7zkSsujsnWgwRQZonyWpXYXfhQYuQ==
+ dependencies:
+ "@codemirror/language" "^6.0.0"
+ "@lezer/highlight" "^1.0.0"
+ "@lezer/lr" "^1.0.0"
+
+codemirror@5.58.2:
version "5.58.2"
resolved "https://registry.npmjs.org/codemirror/-/codemirror-5.58.2.tgz#ed54a1796de1498688bea1cdd4e9eeb187565d1b"
integrity sha512-K/hOh24cCwRutd1Mk3uLtjWzNISOkm4fvXiMO7LucCrqbh6aJDdtqUziim3MZUI6wOY0rvY1SlL1Ork01uMy6w==
-collection-visit@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0"
- integrity sha512-lNkKvzEeMBBjUGHZ+q6z9pSJla0KWAQPvtzhEV9+iGyQYG+pBpl7xKDhxoNSOZH2hhv0v5k0y2yAM4o4SjoSkw==
- dependencies:
- map-visit "^1.0.0"
- object-visit "^1.0.0"
-
color-convert@^1.9.0:
version "1.9.3"
resolved "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
@@ -5348,7 +5426,7 @@ commander@^2.20.0, commander@^2.6.0:
resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
-commander@^4.0.0, commander@^4.1.1:
+commander@^4.1.1:
version "4.1.1"
resolved "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068"
integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==
@@ -5378,11 +5456,6 @@ compare-versions@^3.6.0:
resolved "https://registry.npmjs.org/compare-versions/-/compare-versions-3.6.0.tgz#1a5689913685e5a87637b8d3ffca75514ec41d62"
integrity sha512-W6Af2Iw1z4CB7q4uU4hv646dW9GQuBM+YpC0UvUCWSD8w90SJjp+ujJuXaEMtAXBtSqGfMPuFOVn4/+FlaqfBA==
-component-emitter@^1.2.1:
- version "1.3.1"
- resolved "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.1.tgz#ef1d5796f7d93f135ee6fb684340b26403c97d17"
- integrity sha512-T0+barUSQRTUQASh8bx02dl+DhF54GtIDY13Y3m9oWTklKbb3Wv974meRpeZ3lp1JpLVECWWNHC4vaG2XHXouQ==
-
compressible@~2.0.16:
version "2.0.18"
resolved "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba"
@@ -5408,16 +5481,6 @@ concat-map@0.0.1:
resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
-concat-stream@^1.5.0:
- version "1.6.2"
- resolved "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34"
- integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==
- dependencies:
- buffer-from "^1.0.0"
- inherits "^2.0.3"
- readable-stream "^2.2.2"
- typedarray "^0.0.6"
-
configstore@^5.0.1:
version "5.0.1"
resolved "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz#d365021b5df4b98cdd187d6a3b0e3f6a7cc5ed96"
@@ -5440,11 +5503,6 @@ connect@^3.6.6:
parseurl "~1.3.3"
utils-merge "1.0.1"
-console-browserify@^1.1.0:
- version "1.2.0"
- resolved "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz#67063cef57ceb6cf4993a2ab3a55840ae8c49336"
- integrity sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==
-
console-control-strings@^1.0.0, console-control-strings@^1.1.0, console-control-strings@~1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
@@ -5468,11 +5526,6 @@ consolidate@^0.16.0:
dependencies:
bluebird "^3.7.2"
-constants-browserify@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75"
- integrity sha512-xFxOwqIzR/e1k1gLiWEophSCMqXcwVHIH7akf7b/vxcUeGunlj3hvZaaqxwHsTgn+IndtkQJgSztIDWeumWJDQ==
-
"consul-acls@file:packages/consul-acls":
version "0.1.0"
@@ -5543,28 +5596,11 @@ cookie@~0.4.1:
resolved "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz#0e41f24de5ecf317947c82fc789e06a884824432"
integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==
-copy-concurrently@^1.0.0:
- version "1.0.5"
- resolved "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0"
- integrity sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==
- dependencies:
- aproba "^1.1.1"
- fs-write-stream-atomic "^1.0.8"
- iferr "^0.1.5"
- mkdirp "^0.5.1"
- rimraf "^2.5.4"
- run-queue "^1.0.0"
-
copy-dereference@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
integrity sha512-40TSLuhhbiKeszZhK9LfNdazC67Ue4kq/gGwN5sdxEUWPXTIMmKmGmgD9mPfNKVAeecEW+NfEIpBaZoACCQLLw==
-copy-descriptor@^0.1.0:
- version "0.1.1"
- resolved "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d"
- integrity sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw==
-
core-js-compat@^3.31.0, core-js-compat@^3.36.1:
version "3.37.1"
resolved "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz#c844310c7852f4bdf49b8d339730b97e17ff09ee"
@@ -5608,36 +5644,10 @@ cosmiconfig@^7.0.0:
path-type "^4.0.0"
yaml "^1.10.0"
-create-ecdh@^4.0.0:
- version "4.0.4"
- resolved "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz#d6e7f4bffa66736085a0762fd3a632684dabcc4e"
- integrity sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==
- dependencies:
- bn.js "^4.1.0"
- elliptic "^6.5.3"
-
-create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0:
- version "1.2.0"
- resolved "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196"
- integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==
- dependencies:
- cipher-base "^1.0.1"
- inherits "^2.0.1"
- md5.js "^1.3.4"
- ripemd160 "^2.0.1"
- sha.js "^2.4.0"
-
-create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7:
- version "1.1.7"
- resolved "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff"
- integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==
- dependencies:
- cipher-base "^1.0.3"
- create-hash "^1.1.0"
- inherits "^2.0.1"
- ripemd160 "^2.0.0"
- safe-buffer "^5.0.1"
- sha.js "^2.4.8"
+crelt@^1.0.5, crelt@^1.0.6:
+ version "1.0.6"
+ resolved "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz#7cc898ea74e190fb6ef9dae57f8f81cf7302df72"
+ integrity sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==
cross-spawn@^6.0.0, cross-spawn@^6.0.5:
version "6.0.5"
@@ -5659,23 +5669,6 @@ cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
shebang-command "^2.0.0"
which "^2.0.1"
-crypto-browserify@^3.11.0:
- version "3.12.0"
- resolved "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec"
- integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==
- dependencies:
- browserify-cipher "^1.0.0"
- browserify-sign "^4.0.0"
- create-ecdh "^4.0.0"
- create-hash "^1.1.0"
- create-hmac "^1.1.0"
- diffie-hellman "^5.0.0"
- inherits "^2.0.1"
- pbkdf2 "^3.0.3"
- public-encrypt "^4.0.0"
- randombytes "^2.0.0"
- randomfill "^1.0.3"
-
crypto-random-string@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5"
@@ -5741,10 +5734,10 @@ cssstyle@^2.3.0:
dependencies:
cssom "~0.3.6"
-cyclist@^1.0.1:
- version "1.0.2"
- resolved "https://registry.npmjs.org/cyclist/-/cyclist-1.0.2.tgz#673b5f233bf34d8e602b949429f8171d9121bea3"
- integrity sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==
+csstype@^3.1.3:
+ version "3.1.3"
+ resolved "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81"
+ integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==
"d3-array@2 - 3", "d3-array@2.10.0 - 3":
version "3.2.4"
@@ -5873,7 +5866,7 @@ dayjs@^1.9.3:
resolved "https://registry.npmjs.org/dayjs/-/dayjs-1.11.11.tgz#dfe0e9d54c5f8b68ccf8ca5f72ac603e7e5ed59e"
integrity sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==
-debug@2.6.9, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8, debug@^2.6.9:
+debug@2.6.9, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.6.8, debug@^2.6.9:
version "2.6.9"
resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
@@ -5909,6 +5902,14 @@ decode-uri-component@^0.2.0:
resolved "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz#e69dbe25d37941171dd540e024c444cd5188e1e9"
integrity sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==
+decorator-transforms@^1.0.1:
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/decorator-transforms/-/decorator-transforms-1.2.1.tgz#d72e39b95c9e3d63465f82b148d021919e9d198f"
+ integrity sha512-UUtmyfdlHvYoX3VSG1w5rbvBQ2r5TX1JsE4hmKU9snleFymadA3VACjl6SRfi9YgBCSjBbfQvR1bs9PRW9yBKw==
+ dependencies:
+ "@babel/plugin-syntax-decorators" "^7.23.3"
+ babel-import-util "^2.0.1"
+
decorator-transforms@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/decorator-transforms/-/decorator-transforms-2.0.0.tgz#4e9178a8905c81ff79f4078dc6dfb716244ecd37"
@@ -5917,6 +5918,14 @@ decorator-transforms@^2.0.0:
"@babel/plugin-syntax-decorators" "^7.23.3"
babel-import-util "^3.0.0"
+decorator-transforms@^2.3.0:
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/decorator-transforms/-/decorator-transforms-2.3.0.tgz#521d0617627e289dc47c2186787ac80390ee988a"
+ integrity sha512-jo8c1ss9yFPudHuYYcrJ9jpkDZIoi+lOGvt+Uyp9B+dz32i50icRMx9Bfa8hEt7TnX1FyKWKkjV+cUdT/ep2kA==
+ dependencies:
+ "@babel/plugin-syntax-decorators" "^7.23.3"
+ babel-import-util "^3.0.0"
+
dedent@^0.7.0:
version "0.7.0"
resolved "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c"
@@ -5981,28 +5990,6 @@ define-properties@^1.2.0, define-properties@^1.2.1:
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
-define-property@^0.2.5:
- version "0.2.5"
- resolved "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116"
- integrity sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==
- dependencies:
- is-descriptor "^0.1.0"
-
-define-property@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6"
- integrity sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==
- dependencies:
- is-descriptor "^1.0.0"
-
-define-property@^2.0.2:
- version "2.0.2"
- resolved "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d"
- integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==
- dependencies:
- is-descriptor "^1.0.2"
- isobject "^3.0.1"
-
defined@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/defined/-/defined-1.0.1.tgz#c0b9db27bfaffd95d6f61399419b893df0f91ebf"
@@ -6033,14 +6020,6 @@ depd@~1.1.2:
resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9"
integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
-des.js@^1.0.0:
- version "1.1.0"
- resolved "https://registry.npmjs.org/des.js/-/des.js-1.1.0.tgz#1d37f5766f3bbff4ee9638e871a8768c173b81da"
- integrity sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg==
- dependencies:
- inherits "^2.0.1"
- minimalistic-assert "^1.0.0"
-
destroy@1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015"
@@ -6063,6 +6042,11 @@ detect-indent@^6.0.0:
resolved "https://registry.npmjs.org/detect-indent/-/detect-indent-6.1.0.tgz#592485ebbbf6b3b1ab2be175c8393d04ca0d57e6"
integrity sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==
+detect-libc@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b"
+ integrity sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==
+
detect-newline@3.1.0:
version "3.1.0"
resolved "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651"
@@ -6076,16 +6060,6 @@ dezalgo@^1.0.0:
asap "^2.0.0"
wrappy "1"
-dialog-polyfill@^0.5.6:
- version "0.5.6"
- resolved "https://registry.npmjs.org/dialog-polyfill/-/dialog-polyfill-0.5.6.tgz#7507b4c745a82fcee0fa07ce64d835979719599a"
- integrity sha512-ZbVDJI9uvxPAKze6z146rmfUZjBqNEwcnFTVamQzXH+svluiV7swmVIGr7miwADgfgt1G2JQIytypM9fbyhX4w==
-
-didyoumean@^1.2.2:
- version "1.2.2"
- resolved "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz#989346ffe9e839b4555ecf5666edea0d3e8ad037"
- integrity sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==
-
diff@^4.0.2:
version "4.0.2"
resolved "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
@@ -6096,15 +6070,6 @@ diff@^5.0.0:
resolved "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531"
integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==
-diffie-hellman@^5.0.0:
- version "5.0.3"
- resolved "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875"
- integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==
- dependencies:
- bn.js "^4.1.0"
- miller-rabin "^4.0.0"
- randombytes "^2.0.0"
-
dir-glob@^3.0.1:
version "3.0.1"
resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
@@ -6112,11 +6077,6 @@ dir-glob@^3.0.1:
dependencies:
path-type "^4.0.0"
-dlv@^1.1.3:
- version "1.1.3"
- resolved "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz#5c198a8a11453596e751494d49874bc7732f2e79"
- integrity sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==
-
doctoc@^2.0.0:
version "2.2.1"
resolved "https://registry.npmjs.org/doctoc/-/doctoc-2.2.1.tgz#83f6a6bf4df97defbe027c9a82d13091a138ffe2"
@@ -6145,11 +6105,6 @@ dom-serializer@^1.0.1:
domhandler "^4.2.0"
entities "^2.0.0"
-domain-browser@^1.1.1:
- version "1.2.0"
- resolved "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda"
- integrity sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==
-
domelementtype@^2.0.1, domelementtype@^2.2.0:
version "2.3.0"
resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d"
@@ -6200,15 +6155,14 @@ dotignore@^0.1.2:
dependencies:
minimatch "^3.0.4"
-duplexify@^3.4.2, duplexify@^3.6.0:
- version "3.7.1"
- resolved "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309"
- integrity sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==
+dunder-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a"
+ integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==
dependencies:
- end-of-stream "^1.0.0"
- inherits "^2.0.1"
- readable-stream "^2.0.0"
- stream-shift "^1.0.0"
+ call-bind-apply-helpers "^1.0.1"
+ es-errors "^1.3.0"
+ gopd "^1.2.0"
editions@^1.1.1:
version "1.3.4"
@@ -6233,10 +6187,10 @@ electron-to-chromium@^1.3.47, electron-to-chromium@^1.4.796:
resolved "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.818.tgz#7762c8bfd15a07c3833b7f5deed990e9e5a4c24f"
integrity sha512-eGvIk2V0dGImV9gWLq8fDfTTsCAeMDwZqEPMr+jMInxZdnp9Us8UpovYpRCf9NQ7VOFgrN2doNSgvISbsbNpxA==
-elliptic@^6.5.3, elliptic@^6.5.5:
- version "6.5.5"
- resolved "https://registry.npmjs.org/elliptic/-/elliptic-6.5.5.tgz#c715e09f78b6923977610d4c2346d6ce22e6dded"
- integrity sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==
+elliptic@6.6.1:
+ version "6.6.1"
+ resolved "https://registry.npmjs.org/elliptic/-/elliptic-6.6.1.tgz#3b8ffb02670bf69e382c7f65bf524c97c5405c06"
+ integrity sha512-RaddvvMatK2LJHqFJ+YA4WysVN5Ita9E35botqIYspQ4TkRAlCicdzKOjlyv/1Za5RyTNn7di//eEV0uTAfe3g==
dependencies:
bn.js "^4.11.9"
brorand "^1.1.0"
@@ -6246,10 +6200,10 @@ elliptic@^6.5.3, elliptic@^6.5.5:
minimalistic-assert "^1.0.1"
minimalistic-crypto-utils "^1.0.1"
-ember-a11y-refocus@^3.0.2:
- version "3.0.2"
- resolved "https://registry.npmjs.org/ember-a11y-refocus/-/ember-a11y-refocus-3.0.2.tgz#e648c491d3a8d84cb594679bafc8430cd22b2ed4"
- integrity sha512-5T9kAvl0RUBF6SSeaaWpVS2WC8MTktgqiGdLAbxVjT2f2NGrDDPmv7riDVNMsuL5sHRwSKm0EHCIzZ4M3aFMow==
+ember-a11y-refocus@^4.1.4:
+ version "4.1.4"
+ resolved "https://registry.npmjs.org/ember-a11y-refocus/-/ember-a11y-refocus-4.1.4.tgz#ffcabbc91503379cd2c0124cb5f0bc93178098b5"
+ integrity sha512-51tGk30bskObL1LsGZRxzqIxgZhIE8ZvvDYcT1OWphxZlq00+Arz57aMLS4Vz4qhSE40BfeN2qFYP/gXtp9qDA==
dependencies:
ember-cli-babel "^7.26.11"
ember-cli-htmlbars "^6.0.1"
@@ -6269,6 +6223,21 @@ ember-assign-helper@^0.3.0:
ember-cli-babel "^7.19.0"
ember-cli-htmlbars "^4.3.1"
+ember-assign-helper@^0.5.0:
+ version "0.5.1"
+ resolved "https://registry.npmjs.org/ember-assign-helper/-/ember-assign-helper-0.5.1.tgz#5c0dbffe30090df23ad7d6a7595b015beff439a4"
+ integrity sha512-dXHbwlBTJWVjG7k4dhVrT3Gh4nQt6rC2LjyltuPztIhQ+YcPYHMqAPJRJYLGZu16aPSJbaGF8K+u51i7CLzqlQ==
+ dependencies:
+ "@embroider/addon-shim" "^1.8.7"
+
+ember-async-data@^1.0.1:
+ version "1.0.3"
+ resolved "https://registry.npmjs.org/ember-async-data/-/ember-async-data-1.0.3.tgz#4e5afe4c0e05071e02d05724a64af9b909c27c5b"
+ integrity sha512-54OtoQwNi+/ZvPOVuT4t8fcHR9xL8N7kBydzcZSo6BIEsLYeXPi3+jUR8niWjfjXXhKlJ8EWXR0lTeHleTrxbw==
+ dependencies:
+ "@ember/test-waiters" "^3.0.0"
+ "@embroider/addon-shim" "^1.8.6"
+
ember-auto-import@^1.10.1, ember-auto-import@^1.11.3, ember-auto-import@^1.5.3:
version "1.12.2"
resolved "https://registry.npmjs.org/ember-auto-import/-/ember-auto-import-1.12.2.tgz#cc7298ee5c0654b0249267de68fb27a2861c3579"
@@ -6304,7 +6273,7 @@ ember-auto-import@^1.10.1, ember-auto-import@^1.11.3, ember-auto-import@^1.5.3:
walk-sync "^0.3.3"
webpack "^4.43.0"
-ember-auto-import@^2.2.3, ember-auto-import@^2.4.2, ember-auto-import@^2.5.0, ember-auto-import@^2.6.3:
+ember-auto-import@^2.2.3, ember-auto-import@^2.4.2, ember-auto-import@^2.5.0:
version "2.7.4"
resolved "https://registry.npmjs.org/ember-auto-import/-/ember-auto-import-2.7.4.tgz#ca99570eb3d6165968df797a4750aa58073852b5"
integrity sha512-6CdXSegJJc8nwwK7+1lIcBUnMVrJRNd4ZdMgcKbCAwPvcGxMgRVBddSzrX/+q/UuflvTEO26Dk1g7Z6KHMXUhw==
@@ -6344,6 +6313,48 @@ ember-auto-import@^2.2.3, ember-auto-import@^2.4.2, ember-auto-import@^2.5.0, em
typescript-memoize "^1.0.0-alpha.3"
walk-sync "^3.0.0"
+ember-auto-import@^2.6.3:
+ version "2.10.0"
+ resolved "https://registry.npmjs.org/ember-auto-import/-/ember-auto-import-2.10.0.tgz#2a29b82335eba4375d115570cbe836666ed2e7cc"
+ integrity sha512-bcBFDYVTFHyqyq8BNvsj6UO3pE6Uqou/cNmee0WaqBgZ+1nQqFz0UE26usrtnFAT+YaFZSkqF2H36QW84k0/cg==
+ dependencies:
+ "@babel/core" "^7.16.7"
+ "@babel/plugin-proposal-class-properties" "^7.16.7"
+ "@babel/plugin-proposal-decorators" "^7.16.7"
+ "@babel/plugin-proposal-private-methods" "^7.16.7"
+ "@babel/plugin-transform-class-static-block" "^7.16.7"
+ "@babel/preset-env" "^7.16.7"
+ "@embroider/macros" "^1.0.0"
+ "@embroider/shared-internals" "^2.0.0"
+ babel-loader "^8.0.6"
+ babel-plugin-ember-modules-api-polyfill "^3.5.0"
+ babel-plugin-ember-template-compilation "^2.0.1"
+ babel-plugin-htmlbars-inline-precompile "^5.2.1"
+ babel-plugin-syntax-dynamic-import "^6.18.0"
+ broccoli-debug "^0.6.4"
+ broccoli-funnel "^3.0.8"
+ broccoli-merge-trees "^4.2.0"
+ broccoli-plugin "^4.0.0"
+ broccoli-source "^3.0.0"
+ css-loader "^5.2.0"
+ debug "^4.3.1"
+ fs-extra "^10.0.0"
+ fs-tree-diff "^2.0.0"
+ handlebars "^4.3.1"
+ is-subdir "^1.2.0"
+ js-string-escape "^1.0.1"
+ lodash "^4.17.19"
+ mini-css-extract-plugin "^2.5.2"
+ minimatch "^3.0.0"
+ parse5 "^6.0.1"
+ pkg-entry-points "^1.1.0"
+ resolve "^1.20.0"
+ resolve-package-path "^4.0.3"
+ semver "^7.3.4"
+ style-loader "^2.0.0"
+ typescript-memoize "^1.0.0-alpha.3"
+ walk-sync "^3.0.0"
+
ember-basic-dropdown@3.0.21, ember-basic-dropdown@^3.0.21:
version "3.0.21"
resolved "https://registry.npmjs.org/ember-basic-dropdown/-/ember-basic-dropdown-3.0.21.tgz#5711d071966919c9578d2d5ac2c6dcadbb5ea0e0"
@@ -6427,7 +6438,7 @@ ember-cli-babel-plugin-helpers@^1.0.0, ember-cli-babel-plugin-helpers@^1.1.0, em
resolved "https://registry.npmjs.org/ember-cli-babel-plugin-helpers/-/ember-cli-babel-plugin-helpers-1.1.1.tgz#5016b80cdef37036c4282eef2d863e1d73576879"
integrity sha512-sKvOiPNHr5F/60NLd7SFzMpYPte/nnGkq/tMIfXejfKHIhaiIkYFqX8Z9UFTKWLLn+V7NOaby6niNPZUdvKCRw==
-ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.6.0, ember-cli-babel@^6.8.1, ember-cli-babel@^6.8.2:
+ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.6.0, ember-cli-babel@^6.8.1, ember-cli-babel@^6.8.2:
version "6.18.0"
resolved "https://registry.npmjs.org/ember-cli-babel/-/ember-cli-babel-6.18.0.tgz#3f6435fd275172edeff2b634ee7b29ce74318957"
integrity sha512-7ceC8joNYxY2wES16iIBlbPSxwKDBhYwC8drU3ZEvuPDMwVv1KzxCNu1fvxyFEBWhwaRNTUxSCsEVoTd9nosGA==
@@ -6482,39 +6493,6 @@ ember-cli-babel@^7.0.0, ember-cli-babel@^7.1.3, ember-cli-babel@^7.10.0, ember-c
rimraf "^3.0.1"
semver "^5.5.0"
-ember-cli-babel@^8.2.0:
- version "8.2.0"
- resolved "https://registry.npmjs.org/ember-cli-babel/-/ember-cli-babel-8.2.0.tgz#91e14c22ac22956177002385947724174553d41c"
- integrity sha512-8H4+jQElCDo6tA7CamksE66NqBXWs7VNpS3a738L9pZCjg2kXIX4zoyHzkORUqCtr0Au7YsCnrlAMi1v2ALo7A==
- dependencies:
- "@babel/helper-compilation-targets" "^7.20.7"
- "@babel/plugin-proposal-class-properties" "^7.16.5"
- "@babel/plugin-proposal-decorators" "^7.20.13"
- "@babel/plugin-proposal-private-methods" "^7.16.5"
- "@babel/plugin-proposal-private-property-in-object" "^7.20.5"
- "@babel/plugin-transform-class-static-block" "^7.22.11"
- "@babel/plugin-transform-modules-amd" "^7.20.11"
- "@babel/plugin-transform-runtime" "^7.13.9"
- "@babel/plugin-transform-typescript" "^7.20.13"
- "@babel/preset-env" "^7.20.2"
- "@babel/runtime" "7.12.18"
- amd-name-resolver "^1.3.1"
- babel-plugin-debug-macros "^0.3.4"
- babel-plugin-ember-data-packages-polyfill "^0.1.2"
- babel-plugin-ember-modules-api-polyfill "^3.5.0"
- babel-plugin-module-resolver "^5.0.0"
- broccoli-babel-transpiler "^8.0.0"
- broccoli-debug "^0.6.4"
- broccoli-funnel "^3.0.8"
- broccoli-source "^3.0.1"
- calculate-cache-key-for-tree "^2.0.0"
- clone "^2.1.2"
- ember-cli-babel-plugin-helpers "^1.1.1"
- ember-cli-version-checker "^5.1.2"
- ensure-posix-path "^1.0.2"
- resolve-package-path "^4.0.3"
- semver "^7.3.8"
-
ember-cli-code-coverage@^1.0.0-beta.4:
version "1.0.3"
resolved "https://registry.npmjs.org/ember-cli-code-coverage/-/ember-cli-code-coverage-1.0.3.tgz#9a6e5e6350d70761eba749d68ebe2e0d9aa3492f"
@@ -6617,7 +6595,7 @@ ember-cli-htmlbars@^5.0.0, ember-cli-htmlbars@^5.1.0, ember-cli-htmlbars@^5.1.2,
strip-bom "^4.0.0"
walk-sync "^2.2.0"
-ember-cli-htmlbars@^6.0.0, ember-cli-htmlbars@^6.0.1, ember-cli-htmlbars@^6.1.1, ember-cli-htmlbars@^6.3.0:
+ember-cli-htmlbars@^6.0.0, ember-cli-htmlbars@^6.0.1, ember-cli-htmlbars@^6.1.1, ember-cli-htmlbars@^6.2.0:
version "6.3.0"
resolved "https://registry.npmjs.org/ember-cli-htmlbars/-/ember-cli-htmlbars-6.3.0.tgz#ac85f2bbd09788992ab7f9ca832cd044fb8e5798"
integrity sha512-N9Y80oZfcfWLsqickMfRd9YByVcTGyhYRnYQ2XVPVrp6jyUyOeRWmEAPh7ERSXpp8Ws4hr/JB9QVQrn/yZa+Ag==
@@ -6693,17 +6671,6 @@ ember-cli-path-utils@^1.0.0:
resolved "https://registry.npmjs.org/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
integrity sha512-Qq0vvquzf4cFHoDZavzkOy3Izc893r/5spspWgyzLCPTaG78fM3HsrjZm7UWEltbXUqwHHYrqZd/R0jS08NqSA==
-ember-cli-postcss@^8.1.0:
- version "8.2.0"
- resolved "https://registry.npmjs.org/ember-cli-postcss/-/ember-cli-postcss-8.2.0.tgz#9cc1fee624d2d13c41633cf32d4e8cb8d5f88eff"
- integrity sha512-S2HQqmNtcezmLSt/OPZKCXg+aRV7yFoZp+tn1HCLSbR/eU95xl7MWxTjbj/wOIGMfhggy/hBT2+STDh8mGuVpw==
- dependencies:
- broccoli-merge-trees "^4.2.0"
- broccoli-postcss "^6.0.1"
- broccoli-postcss-single "^5.0.1"
- ember-cli-babel "^7.26.11"
- merge "^2.1.1"
-
ember-cli-preprocess-registry@^3.3.0:
version "3.3.0"
resolved "https://registry.npmjs.org/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-3.3.0.tgz#685837a314fbe57224bd54b189f4b9c23907a2de"
@@ -7069,6 +7036,17 @@ ember-concurrency-decorators@^2.0.0:
ember-compatibility-helpers "^1.2.0"
ember-destroyable-polyfill "^2.0.2"
+ember-concurrency@^4.0.4:
+ version "4.0.4"
+ resolved "https://registry.npmjs.org/ember-concurrency/-/ember-concurrency-4.0.4.tgz#1d021e652d159e9bbdc97e9071c7142559531b59"
+ integrity sha512-Y+PwbFE2r3+ANlT0lTBNokLXTRFLV6lnGkZ8u5tDhND5o2wD1wkh9JdP8KZ8aJ+J0dmhncVGQNi+Dbbtc6xTfg==
+ dependencies:
+ "@babel/helper-module-imports" "^7.22.15"
+ "@babel/helper-plugin-utils" "^7.12.13"
+ "@babel/types" "^7.12.13"
+ "@embroider/addon-shim" "^1.8.7"
+ decorator-transforms "^1.0.1"
+
ember-copy@2.0.1:
version "2.0.1"
resolved "https://registry.npmjs.org/ember-copy/-/ember-copy-2.0.1.tgz#13192b12a250324bb4a8b4547a680b113f4e3041"
@@ -7146,13 +7124,12 @@ ember-element-helper@^0.5.5:
ember-cli-babel "^7.17.2"
ember-cli-htmlbars "^5.1.0"
-ember-element-helper@^0.8.5:
- version "0.8.6"
- resolved "https://registry.npmjs.org/ember-element-helper/-/ember-element-helper-0.8.6.tgz#564d63dbbb6130e4c69ff06b3bd8fbfb9cb4787a"
- integrity sha512-WcbkJKgBZypRGwujeiPrQfZRhETVFLR0wvH2UxDaNBhLWncapt6KK+M/2i/eODoAQwgGxziejhXC6Cbqa9zA8g==
+ember-element-helper@^0.8.6:
+ version "0.8.8"
+ resolved "https://registry.npmjs.org/ember-element-helper/-/ember-element-helper-0.8.8.tgz#b7cb5a6450ec00ae6bc4974a5f6d7224aced8f35"
+ integrity sha512-3slTltQV5ke53t3YVP2GYoswsQ6y+lhuVzKmt09tbEx91DapG8I/xa8W5OA0StvcQlavL3/vHrz/vCQEFs8bBA==
dependencies:
"@embroider/addon-shim" "^1.8.3"
- "@embroider/util" "^1.0.0"
ember-exam@^6.1.0:
version "6.1.0"
@@ -7186,14 +7163,23 @@ ember-factory-for-polyfill@^1.3.1:
dependencies:
ember-cli-version-checker "^2.1.0"
-ember-focus-trap@^1.1.0:
- version "1.1.0"
- resolved "https://registry.npmjs.org/ember-focus-trap/-/ember-focus-trap-1.1.0.tgz#e3c47c6e916e838af3884b43e2794e87088d2bac"
- integrity sha512-KxbCKpAJaBVZm+bW4tHPoBJAZThmxa6pI+WQusL+bj0RtAnGUNkWsVy6UBMZ5QqTQzf4EvGHkCVACVp5lbAWMQ==
+ember-focus-trap@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/ember-focus-trap/-/ember-focus-trap-1.1.1.tgz#257512ba847a40ea4cf86d83495b4e47c0f9efed"
+ integrity sha512-5tOWu6eV1UoNZE+P9Gl9lJXNrENZVCoOXi52ePb7JOrOZ3ckOk1OkPsFwR4Jym9VJ7vZ6S3Z3D8BrkFa2aCpYw==
dependencies:
"@embroider/addon-shim" "^1.0.0"
focus-trap "^6.7.1"
+ember-functions-as-helper-polyfill@^2.1.2:
+ version "2.1.3"
+ resolved "https://registry.npmjs.org/ember-functions-as-helper-polyfill/-/ember-functions-as-helper-polyfill-2.1.3.tgz#5fc78d222f326ebd2b796241dbd8f70741f53952"
+ integrity sha512-Hte8jfOmSNzrz/vOchf68CGaBWXN2/5qKgFaylqr9omW2i4Wt9JmaBWRkeR0AJ53N57q3DX2TOb166Taq6QjiA==
+ dependencies:
+ ember-cli-babel "^7.26.11"
+ ember-cli-typescript "^5.0.0"
+ ember-cli-version-checker "^5.1.2"
+
ember-get-config@^0.3.0:
version "0.3.0"
resolved "https://registry.npmjs.org/ember-get-config/-/ember-get-config-0.3.0.tgz#a73a1a87b48d9dde4c66a0e52ed5260b8a48cfbd"
@@ -7265,15 +7251,12 @@ ember-intl@^5.7.0:
mkdirp "^1.0.4"
silent-error "^1.1.1"
-ember-keyboard@^8.2.1:
- version "8.2.1"
- resolved "https://registry.npmjs.org/ember-keyboard/-/ember-keyboard-8.2.1.tgz#945a8a71068d81c06ad26851008ef81061db2a59"
- integrity sha512-wT9xpt3GKsiodGZoifKU4OyeRjXWlmKV9ZHHsp6wJBwMFpl4wWPjTNdINxivk2qg/WFNIh8nUiwuG4+soWXPdw==
+ember-lifeline@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.npmjs.org/ember-lifeline/-/ember-lifeline-7.0.0.tgz#46780c8f832b6c784ee4681b938a1e1437bfa676"
+ integrity sha512-2l51NzgH5vjN972zgbs+32rnXnnEFKB7qsSpJF+lBI4V5TG6DMy4SfowC72ZEuAtS58OVfwITbOO+RnM21EdpA==
dependencies:
- "@embroider/addon-shim" "^1.8.4"
- ember-destroyable-polyfill "^2.0.3"
- ember-modifier "^2.1.2 || ^3.1.0 || ^4.0.0"
- ember-modifier-manager-polyfill "^1.2.0"
+ "@embroider/addon-shim" "^1.6.0"
ember-load-initializers@^2.1.2:
version "2.1.2"
@@ -7333,7 +7316,18 @@ ember-modifier@^2.1.0:
ember-destroyable-polyfill "^2.0.2"
ember-modifier-manager-polyfill "^1.2.0"
-"ember-modifier@^2.1.2 || ^3.1.0 || ^4.0.0", "ember-modifier@^3.2.7 || ^4.0.0", ember-modifier@^4.1.0:
+ember-modifier@^3.2.7:
+ version "3.2.7"
+ resolved "https://registry.npmjs.org/ember-modifier/-/ember-modifier-3.2.7.tgz#f2d35b7c867cbfc549e1acd8d8903c5ecd02ea4b"
+ integrity sha512-ezcPQhH8jUfcJQbbHji4/ZG/h0yyj1jRDknfYue/ypQS8fM8LrGcCMo0rjDZLzL1Vd11InjNs3BD7BdxFlzGoA==
+ dependencies:
+ ember-cli-babel "^7.26.6"
+ ember-cli-normalize-entity-name "^1.0.0"
+ ember-cli-string-utils "^1.1.0"
+ ember-cli-typescript "^5.0.0"
+ ember-compatibility-helpers "^1.2.5"
+
+"ember-modifier@^3.2.7 || ^4.0.0", ember-modifier@^4.1.0:
version "4.2.0"
resolved "https://registry.npmjs.org/ember-modifier/-/ember-modifier-4.2.0.tgz#f99cb817b9b85c5188c63f853cd06aa62e8dde57"
integrity sha512-BJ48eTEGxD8J7+lofwVmee7xDgNDgpr5dd6+MSu4gk+I6xb35099RMNorXY5hjjwMJEyi/IRR6Yn3M7iJMz8Zw==
@@ -7343,16 +7337,15 @@ ember-modifier@^2.1.0:
ember-cli-normalize-entity-name "^1.0.0"
ember-cli-string-utils "^1.1.0"
-ember-modifier@^3.2.7:
- version "3.2.7"
- resolved "https://registry.npmjs.org/ember-modifier/-/ember-modifier-3.2.7.tgz#f2d35b7c867cbfc549e1acd8d8903c5ecd02ea4b"
- integrity sha512-ezcPQhH8jUfcJQbbHji4/ZG/h0yyj1jRDknfYue/ypQS8fM8LrGcCMo0rjDZLzL1Vd11InjNs3BD7BdxFlzGoA==
+ember-modifier@^4.2.0, ember-modifier@^4.2.2:
+ version "4.2.2"
+ resolved "https://registry.npmjs.org/ember-modifier/-/ember-modifier-4.2.2.tgz#ad6a638dc6f82c7086031c97c2de9b094331c756"
+ integrity sha512-pPYBAGyczX0hedGWQFQOEiL9s45KS9efKxJxUQkMLjQyh+1Uef1mcmAGsdw2KmvNupITkE/nXxmVO1kZ9tt3ag==
dependencies:
- ember-cli-babel "^7.26.6"
+ "@embroider/addon-shim" "^1.8.7"
+ decorator-transforms "^2.0.0"
ember-cli-normalize-entity-name "^1.0.0"
ember-cli-string-utils "^1.1.0"
- ember-cli-typescript "^5.0.0"
- ember-compatibility-helpers "^1.2.5"
ember-named-blocks-polyfill@^0.2.5:
version "0.2.5"
@@ -7421,6 +7414,19 @@ ember-power-select@^4.0.0, ember-power-select@^4.0.5:
ember-text-measurer "^0.6.0"
ember-truth-helpers "^2.1.0 || ^3.0.0"
+ember-power-select@^8.7.1:
+ version "8.7.3"
+ resolved "https://registry.npmjs.org/ember-power-select/-/ember-power-select-8.7.3.tgz#01d2977b9e6d65dae2835bfd07d204050147b4b7"
+ integrity sha512-jDUmW2Wy+xtn/BkTGIq1d3NVGanZRbP5bSonIJysZoF9GfcD8W0iVs4Wj7q6CnzPZ/fMH8ZD2/ZQ+gOQBj7ggg==
+ dependencies:
+ "@embroider/addon-shim" "^1.10.0"
+ "@embroider/util" "^1.13.2"
+ decorator-transforms "^2.3.0"
+ ember-assign-helper "^0.5.0"
+ ember-lifeline "^7.0.0"
+ ember-modifier "^4.2.0"
+ ember-truth-helpers "^4.0.3"
+
ember-qunit@^5.1.5:
version "5.1.5"
resolved "https://registry.npmjs.org/ember-qunit/-/ember-qunit-5.1.5.tgz#24a7850f052be24189ff597dfc31b923e684c444"
@@ -7480,14 +7486,15 @@ ember-resolver@^8.0.3:
ember-cli-version-checker "^5.1.2"
resolve "^1.20.0"
-ember-resources@^5.0.1:
- version "5.6.4"
- resolved "https://registry.npmjs.org/ember-resources/-/ember-resources-5.6.4.tgz#1ae05bb5398ab0d8fab8c0925c5bf679ee86e327"
- integrity sha512-ShdosnruPm37jPpzPOgPVelymEDJT/27Jz/j5AGPVAfCaUhRIocTxNMtPx13ox890A2babuPF5M3Ur8UFidqtw==
+ember-resources@^6.0.0:
+ version "6.5.2"
+ resolved "https://registry.npmjs.org/ember-resources/-/ember-resources-6.5.2.tgz#59ebf5bde2dcc6809e4d9820c7cfc0ce0d3487ed"
+ integrity sha512-8JQ9ebTcKjsmhR5AJ7JNiXziuOiILjrEbGRqcFKkTvodK4QdvvOspDz8yejsf/J/1YUMFe4fjJnjqc2wpORX2Q==
dependencies:
"@babel/runtime" "^7.17.8"
"@embroider/addon-shim" "^1.2.0"
- "@embroider/macros" "^1.2.0"
+ "@embroider/macros" "^1.12.3"
+ ember-async-data "^1.0.1"
ember-rfc176-data@^0.3.13, ember-rfc176-data@^0.3.15, ember-rfc176-data@^0.3.17:
version "0.3.18"
@@ -7595,15 +7602,15 @@ ember-stargate@^0.2.0:
ember-in-element-polyfill "^1.0.0"
tracked-maps-and-sets "^2.1.0"
-ember-stargate@^0.4.3:
- version "0.4.3"
- resolved "https://registry.npmjs.org/ember-stargate/-/ember-stargate-0.4.3.tgz#93e92e4928d489557401d70e52b242b38f36f9ab"
- integrity sha512-GeT5n+TT3Lfl335f16fx9ms0Jap+v5LTs8otIaQEGtFbSP5Jj/hlT3JPB9Uo8IDLXdjejxJsKRpCEzRD43g5dg==
+ember-stargate@^0.5.0:
+ version "0.5.0"
+ resolved "https://registry.npmjs.org/ember-stargate/-/ember-stargate-0.5.0.tgz#b50c3831ee11c91518b266c386ff01ecd02967f1"
+ integrity sha512-HYUww+s1M5X4nmErc3VxsCmGAelBrp8AecObadEvO3u6c9cF8RpsMciWpjfvcD94gy0sneIg61S91S4XJaormQ==
dependencies:
"@ember/render-modifiers" "^2.0.0"
"@embroider/addon-shim" "^1.0.0"
"@glimmer/component" "^1.1.2"
- ember-resources "^5.0.1"
+ ember-resources "^6.0.0"
tracked-maps-and-sets "^3.0.1"
ember-string-fns@^1.4.0:
@@ -7621,13 +7628,14 @@ ember-style-modifier@^0.6.0:
ember-cli-babel "^7.21.0"
ember-modifier "^2.1.0"
-ember-style-modifier@^3.0.1:
- version "3.1.1"
- resolved "https://registry.npmjs.org/ember-style-modifier/-/ember-style-modifier-3.1.1.tgz#313269708552c42255806586160411840adc98c5"
- integrity sha512-J91YLKVp3/m7LrcLEWNSG2sJlSFhE5Ny75empU048qYJtdJMe788Ks/EpKEi953o1mJujVRg792YGrwbrpTzNA==
+ember-style-modifier@^4.4.0:
+ version "4.4.0"
+ resolved "https://registry.npmjs.org/ember-style-modifier/-/ember-style-modifier-4.4.0.tgz#2d1fa6a35d41d88612277d7d149f1e569acaf8d3"
+ integrity sha512-gT1ckbhl1KSj5sWTo/8UChj98eZeE+mUmYoXw8VjwJgWP0wiTCibGZjVbC0WlIUd7umxuG61OQ/ivfF+sAiOEQ==
dependencies:
- ember-auto-import "^2.5.0"
- ember-cli-babel "^7.26.11"
+ "@embroider/addon-shim" "^1.8.7"
+ csstype "^3.1.3"
+ decorator-transforms "^2.0.0"
ember-modifier "^3.2.7 || ^4.0.0"
ember-template-lint@^2.0.1:
@@ -7689,13 +7697,21 @@ ember-tracked-storage-polyfill@1.0.0, ember-tracked-storage-polyfill@^1.0.0:
ember-cli-babel "^7.26.3"
ember-cli-htmlbars "^5.7.1"
-"ember-truth-helpers@^2.1.0 || ^3.0.0", ember-truth-helpers@^3.0.0, ember-truth-helpers@^3.1.1:
+"ember-truth-helpers@^2.1.0 || ^3.0.0", ember-truth-helpers@^3.0.0:
version "3.1.1"
resolved "https://registry.npmjs.org/ember-truth-helpers/-/ember-truth-helpers-3.1.1.tgz#434715926d72bcc63b8a115dec09745fda4474dc"
integrity sha512-FHwJAx77aA5q27EhdaaiBFuy9No+8yaWNT5A7zs0sIFCmf14GbcLn69vJEp6mW7vkITezizGAWhw7gL0Wbk7DA==
dependencies:
ember-cli-babel "^7.22.1"
+ember-truth-helpers@^4.0.3:
+ version "4.0.3"
+ resolved "https://registry.npmjs.org/ember-truth-helpers/-/ember-truth-helpers-4.0.3.tgz#02705dc36f2d68f1d4cff0d8226396c8ae5dee2e"
+ integrity sha512-T6Ogd3pk9FxYiZfSxdjgn3Hb3Ksqgw7CD23V9qfig9jktNdkNEHo4+3PA3cSD/+3a2kdH3KmNvKyarVuzdtEkA==
+ dependencies:
+ "@embroider/addon-shim" "^1.8.6"
+ ember-functions-as-helper-polyfill "^2.1.2"
+
ember-validators@~4.0.0:
version "4.0.1"
resolved "https://registry.npmjs.org/ember-validators/-/ember-validators-4.0.1.tgz#13beefdf185b00efd1b60e51b21380686d8994ba"
@@ -7725,7 +7741,7 @@ encodeurl@~1.0.2:
resolved "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==
-end-of-stream@^1.0.0, end-of-stream@^1.1.0:
+end-of-stream@^1.1.0:
version "1.4.4"
resolved "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0"
integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==
@@ -7753,7 +7769,7 @@ engine.io@~6.5.2:
engine.io-parser "~5.2.1"
ws "~8.17.1"
-enhanced-resolve@^4.0.0, enhanced-resolve@^4.5.0:
+enhanced-resolve@^4.0.0:
version "4.5.0"
resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz#2f3cfd84dbe3b487f18f2db2ef1e064a571ca5ec"
integrity sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==
@@ -7762,10 +7778,10 @@ enhanced-resolve@^4.0.0, enhanced-resolve@^4.5.0:
memory-fs "^0.5.0"
tapable "^1.0.0"
-enhanced-resolve@^5.17.0:
- version "5.17.0"
- resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz#d037603789dd9555b89aaec7eb78845c49089bc5"
- integrity sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==
+enhanced-resolve@^5.17.1:
+ version "5.18.1"
+ resolved "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz#728ab082f8b7b6836de51f1637aab5d3b9568faf"
+ integrity sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==
dependencies:
graceful-fs "^4.2.4"
tapable "^2.2.0"
@@ -7803,7 +7819,7 @@ errlop@^2.0.0:
resolved "https://registry.npmjs.org/errlop/-/errlop-2.2.0.tgz#1ff383f8f917ae328bebb802d6ca69666a42d21b"
integrity sha512-e64Qj9+4aZzjzzFpZC7p5kmm/ccCrbLhAJplhsDXQFs87XTsXwOpH4s1Io2s90Tau/8r2j9f4l/thhDevRjzxw==
-errno@^0.1.3, errno@~0.1.7:
+errno@^0.1.3:
version "0.1.8"
resolved "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz#8bb3e9c7d463be4976ff888f76b4809ebc2e811f"
integrity sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==
@@ -7883,6 +7899,11 @@ es-define-property@^1.0.0:
dependencies:
get-intrinsic "^1.2.4"
+es-define-property@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa"
+ integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==
+
es-errors@^1.2.1, es-errors@^1.3.0:
version "1.3.0"
resolved "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
@@ -7915,14 +7936,31 @@ es-object-atoms@^1.0.0:
dependencies:
es-errors "^1.3.0"
+es-object-atoms@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1"
+ integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==
+ dependencies:
+ es-errors "^1.3.0"
+
es-set-tostringtag@^2.0.3:
version "2.0.3"
resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777"
integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==
dependencies:
- get-intrinsic "^1.2.4"
+ get-intrinsic "^1.2.4"
+ has-tostringtag "^1.0.2"
+ hasown "^2.0.1"
+
+es-set-tostringtag@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d"
+ integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==
+ dependencies:
+ es-errors "^1.3.0"
+ get-intrinsic "^1.2.6"
has-tostringtag "^1.0.2"
- hasown "^2.0.1"
+ hasown "^2.0.2"
es-to-primitive@^1.2.1:
version "1.2.1"
@@ -8026,14 +8064,6 @@ eslint-scope@5.1.1, eslint-scope@^5.1.1:
esrecurse "^4.3.0"
estraverse "^4.1.1"
-eslint-scope@^4.0.3:
- version "4.0.3"
- resolved "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848"
- integrity sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==
- dependencies:
- esrecurse "^4.1.0"
- estraverse "^4.1.1"
-
eslint-utils@^2.0.0, eslint-utils@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27"
@@ -8135,7 +8165,7 @@ esquery@^1.4.0:
dependencies:
estraverse "^5.1.0"
-esrecurse@^4.1.0, esrecurse@^4.3.0:
+esrecurse@^4.3.0:
version "4.3.0"
resolved "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921"
integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==
@@ -8177,19 +8207,11 @@ events-to-array@^1.0.1:
resolved "https://registry.npmjs.org/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
integrity sha512-inRWzRY7nG+aXZxBzEqYKB3HPgwflZRopAjDCHv0whhRx+MTUr1ei0ICZUypdyE0HRm4L2d5VEcIqLD6yl+BFA==
-events@^3.0.0, events@^3.2.0:
+events@^3.2.0:
version "3.3.0"
resolved "https://registry.npmjs.org/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400"
integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==
-evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3:
- version "1.0.3"
- resolved "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02"
- integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==
- dependencies:
- md5.js "^1.3.4"
- safe-buffer "^5.1.1"
-
exec-sh@^0.3.2:
version "0.3.6"
resolved "https://registry.npmjs.org/exec-sh/-/exec-sh-0.3.6.tgz#ff264f9e325519a60cb5e273692943483cca63bc"
@@ -8279,19 +8301,6 @@ exit@^0.1.2:
resolved "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==
-expand-brackets@^2.1.4:
- version "2.1.4"
- resolved "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622"
- integrity sha512-w/ozOKR9Obk3qoWeY/WDi6MFta9AoMR+zud60mdnbniMcBxRuFJyDt2LdX/14A1UABeqk+Uk+LDfUpvoGKppZA==
- dependencies:
- debug "^2.3.3"
- define-property "^0.2.5"
- extend-shallow "^2.0.1"
- posix-character-classes "^0.1.0"
- regex-not "^1.0.0"
- snapdragon "^0.8.1"
- to-regex "^3.0.1"
-
expand-tilde@^2.0.0, expand-tilde@^2.0.2:
version "2.0.2"
resolved "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz#97e801aa052df02454de46b02bf621642cdc8502"
@@ -8336,21 +8345,6 @@ express@^4.10.7, express@^4.17.1:
utils-merge "1.0.1"
vary "~1.1.2"
-extend-shallow@^2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f"
- integrity sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==
- dependencies:
- is-extendable "^0.1.0"
-
-extend-shallow@^3.0.0, extend-shallow@^3.0.2:
- version "3.0.2"
- resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8"
- integrity sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==
- dependencies:
- assign-symbols "^1.0.0"
- is-extendable "^1.0.1"
-
extend@^3.0.0, extend@^3.0.2:
version "3.0.2"
resolved "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
@@ -8365,20 +8359,6 @@ external-editor@^3.0.3:
iconv-lite "^0.4.24"
tmp "^0.0.33"
-extglob@^2.0.4:
- version "2.0.4"
- resolved "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543"
- integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==
- dependencies:
- array-unique "^0.3.2"
- define-property "^1.0.0"
- expand-brackets "^2.1.4"
- extend-shallow "^2.0.1"
- fragment-cache "^0.2.1"
- regex-not "^1.0.0"
- snapdragon "^0.8.1"
- to-regex "^3.0.1"
-
extract-stack@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/extract-stack/-/extract-stack-2.0.0.tgz#11367bc865bfcd9bc0db3123e5edb57786f11f9b"
@@ -8421,7 +8401,7 @@ fast-glob@^2.2.6:
merge2 "^1.2.3"
micromatch "^3.1.10"
-fast-glob@^3.0.3, fast-glob@^3.2.5, fast-glob@^3.2.9, fast-glob@^3.3.0:
+fast-glob@^3.0.3, fast-glob@^3.2.5, fast-glob@^3.2.9:
version "3.3.2"
resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129"
integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==
@@ -8509,11 +8489,6 @@ fb-watchman@^2.0.0:
dependencies:
bser "2.1.1"
-figgy-pudding@^3.5.1:
- version "3.5.2"
- resolved "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz#b4eee8148abb01dcf1d1ac34367d59e12fa61d6e"
- integrity sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==
-
figures@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962"
@@ -8535,11 +8510,6 @@ file-entry-cache@^6.0.1:
dependencies:
flat-cache "^3.0.4"
-file-uri-to-path@1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd"
- integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==
-
filesize@^4.1.2:
version "4.2.1"
resolved "https://registry.npmjs.org/filesize/-/filesize-4.2.1.tgz#ab1cb2069db5d415911c1a13e144c0e743bc89bc"
@@ -8591,23 +8561,6 @@ find-babel-config@^1.1.0, find-babel-config@^1.2.0:
json5 "^1.0.2"
path-exists "^3.0.0"
-find-babel-config@^2.1.1:
- version "2.1.1"
- resolved "https://registry.npmjs.org/find-babel-config/-/find-babel-config-2.1.1.tgz#93703fc8e068db5e4c57592900c5715dd04b7e5b"
- integrity sha512-5Ji+EAysHGe1OipH7GN4qDjok5Z1uw5KAwDCbicU/4wyTZY7CqOCzcWbG7J5ad9mazq67k89fXlbc1MuIfl9uA==
- dependencies:
- json5 "^2.2.3"
- path-exists "^4.0.0"
-
-find-cache-dir@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7"
- integrity sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==
- dependencies:
- commondir "^1.0.1"
- make-dir "^2.0.0"
- pkg-dir "^3.0.0"
-
find-cache-dir@^3.3.1:
version "3.3.2"
resolved "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz#b30c5b6eff0730731aea9bbd9dbecbd80256d64b"
@@ -8754,14 +8707,6 @@ flatted@^3.2.9:
resolved "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a"
integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==
-flush-write-stream@^1.0.0:
- version "1.1.1"
- resolved "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8"
- integrity sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==
- dependencies:
- inherits "^2.0.3"
- readable-stream "^2.3.6"
-
focus-trap@^6.7.1:
version "6.9.4"
resolved "https://registry.npmjs.org/focus-trap/-/focus-trap-6.9.4.tgz#436da1a1d935c48b97da63cd8f361c6f3aa16444"
@@ -8786,19 +8731,16 @@ for-each@^0.3.3:
dependencies:
is-callable "^1.1.3"
-for-in@^1.0.2:
- version "1.0.2"
- resolved "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
- integrity sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==
-
form-data@^3.0.0:
- version "3.0.1"
- resolved "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f"
- integrity sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==
+ version "3.0.4"
+ resolved "https://registry.npmjs.org/form-data/-/form-data-3.0.4.tgz#938273171d3f999286a4557528ce022dc2c98df1"
+ integrity sha512-f0cRzm6dkyVYV3nPoooP8XlccPQukegwhAnpoLcXy+X+A8KfpGOoXwDr9FLZd3wzgLaBGQBE3lY93Zm/i1JvIQ==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
- mime-types "^2.1.12"
+ es-set-tostringtag "^2.1.0"
+ hasown "^2.0.2"
+ mime-types "^2.1.35"
format@^0.2.0:
version "0.2.2"
@@ -8815,26 +8757,11 @@ fraction.js@^4.3.7:
resolved "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz#06ca0085157e42fda7f9e726e79fefc4068840f7"
integrity sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==
-fragment-cache@^0.2.1:
- version "0.2.1"
- resolved "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19"
- integrity sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==
- dependencies:
- map-cache "^0.2.2"
-
fresh@0.5.2:
version "0.5.2"
resolved "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==
-from2@^2.1.0:
- version "2.3.0"
- resolved "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af"
- integrity sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==
- dependencies:
- inherits "^2.0.1"
- readable-stream "^2.0.0"
-
fs-extra@^0.24.0:
version "0.24.0"
resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
@@ -8952,29 +8879,11 @@ fs-updater@^1.0.4:
heimdalljs-logger "^0.1.9"
rimraf "^2.6.2"
-fs-write-stream-atomic@^1.0.8:
- version "1.0.10"
- resolved "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
- integrity sha512-gehEzmPn2nAwr39eay+x3X34Ra+M2QlVUTLhkXPjWdeO8RF9kszk116avgBJM3ZyNHgHXBNx+VmPaFC36k0PzA==
- dependencies:
- graceful-fs "^4.1.2"
- iferr "^0.1.5"
- imurmurhash "^0.1.4"
- readable-stream "1 || 2"
-
fs.realpath@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
-fsevents@^1.2.7:
- version "1.2.13"
- resolved "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz#f325cb0455592428bcf11b383370ef70e3bfcc38"
- integrity sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==
- dependencies:
- bindings "^1.5.0"
- nan "^2.12.1"
-
fsevents@~2.3.2:
version "2.3.3"
resolved "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6"
@@ -9059,6 +8968,22 @@ get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2, get-intrinsic@
has-symbols "^1.0.3"
hasown "^2.0.0"
+get-intrinsic@^1.2.6:
+ version "1.3.0"
+ resolved "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01"
+ integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==
+ dependencies:
+ call-bind-apply-helpers "^1.0.2"
+ es-define-property "^1.0.1"
+ es-errors "^1.3.0"
+ es-object-atoms "^1.1.1"
+ function-bind "^1.1.2"
+ get-proto "^1.0.1"
+ gopd "^1.2.0"
+ has-symbols "^1.1.0"
+ hasown "^2.0.2"
+ math-intrinsics "^1.1.0"
+
get-own-enumerable-property-symbols@^3.0.0:
version "3.0.2"
resolved "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664"
@@ -9069,6 +8994,14 @@ get-package-type@^0.1.0:
resolved "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a"
integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==
+get-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1"
+ integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==
+ dependencies:
+ dunder-proto "^1.0.1"
+ es-object-atoms "^1.0.0"
+
get-stdin@^4.0.1:
version "4.0.1"
resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
@@ -9107,11 +9040,6 @@ get-symbol-description@^1.0.2:
es-errors "^1.3.0"
get-intrinsic "^1.2.4"
-get-value@^2.0.3, get-value@^2.0.6:
- version "2.0.6"
- resolved "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28"
- integrity sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==
-
git-hooks-list@1.0.3:
version "1.0.3"
resolved "https://registry.npmjs.org/git-hooks-list/-/git-hooks-list-1.0.3.tgz#be5baaf78203ce342f2f844a9d2b03dba1b45156"
@@ -9135,20 +9063,13 @@ glob-parent@^3.1.0:
is-glob "^3.1.0"
path-dirname "^1.0.0"
-glob-parent@^5.1.2, glob-parent@~5.1.2:
+glob-parent@^5.1.2:
version "5.1.2"
resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
dependencies:
is-glob "^4.0.1"
-glob-parent@^6.0.2:
- version "6.0.2"
- resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3"
- integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==
- dependencies:
- is-glob "^4.0.3"
-
glob-to-regexp@^0.3.0:
version "0.3.0"
resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab"
@@ -9159,7 +9080,7 @@ glob-to-regexp@^0.4.1:
resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz#c75297087c851b9a578bd217dd59a92f59fe546e"
integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==
-glob@7.2.3, glob@^10.3.10, glob@^5.0.10, glob@^7.0.4, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.1.7, glob@^7.2.3, glob@^9.3.3:
+glob@7.2.3, glob@^5.0.10, glob@^7.0.4, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.1.7, glob@^7.2.3:
version "7.2.3"
resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b"
integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
@@ -9266,7 +9187,12 @@ gopd@^1.0.1:
dependencies:
get-intrinsic "^1.1.3"
-graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.11, graceful-fs@^4.2.4:
+gopd@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1"
+ integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.11, graceful-fs@^4.2.4:
version "4.2.11"
resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3"
integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
@@ -9347,6 +9273,11 @@ has-symbols@^1.0.2, has-symbols@^1.0.3:
resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8"
integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
+has-symbols@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338"
+ integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==
+
has-tostringtag@^1.0.0, has-tostringtag@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
@@ -9359,54 +9290,6 @@ has-unicode@^2.0.0, has-unicode@^2.0.1:
resolved "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
integrity sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==
-has-value@^0.3.1:
- version "0.3.1"
- resolved "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f"
- integrity sha512-gpG936j8/MzaeID5Yif+577c17TxaDmhuyVgSwtnL/q8UUTySg8Mecb+8Cf1otgLoD7DDH75axp86ER7LFsf3Q==
- dependencies:
- get-value "^2.0.3"
- has-values "^0.1.4"
- isobject "^2.0.0"
-
-has-value@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177"
- integrity sha512-IBXk4GTsLYdQ7Rvt+GRBrFSVEkmuOUy4re0Xjd9kJSUQpnTrWR4/y9RpfexN9vkAPMFuQoeWKwqzPozRTlasGw==
- dependencies:
- get-value "^2.0.6"
- has-values "^1.0.0"
- isobject "^3.0.0"
-
-has-values@^0.1.4:
- version "0.1.4"
- resolved "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771"
- integrity sha512-J8S0cEdWuQbqD9//tlZxiMuMNmxB8PlEwvYwuxsTmR1G5RXUePEX/SJn7aD0GMLieuZYSwNH0cQuJGwnYunXRQ==
-
-has-values@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f"
- integrity sha512-ODYZC64uqzmtfGMEAX/FvZiRyWLpAC3vYnNunURUnkGVTS+mI0smVsWaPydRBsE3g+ok7h960jChO8mFcWlHaQ==
- dependencies:
- is-number "^3.0.0"
- kind-of "^4.0.0"
-
-hash-base@^3.0.0:
- version "3.1.0"
- resolved "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33"
- integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==
- dependencies:
- inherits "^2.0.4"
- readable-stream "^3.6.0"
- safe-buffer "^5.2.0"
-
-hash-base@~3.0:
- version "3.0.4"
- resolved "https://registry.npmjs.org/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918"
- integrity sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==
- dependencies:
- inherits "^2.0.1"
- safe-buffer "^5.0.1"
-
hash-for-dep@^1.0.2, hash-for-dep@^1.2.3, hash-for-dep@^1.4.7, hash-for-dep@^1.5.0, hash-for-dep@^1.5.1:
version "1.5.1"
resolved "https://registry.npmjs.org/hash-for-dep/-/hash-for-dep-1.5.1.tgz#497754b39bee2f1c4ade4521bfd2af0a7c1196e3"
@@ -9626,11 +9509,6 @@ http-proxy@^1.13.1, http-proxy@^1.18.1:
follow-redirects "^1.0.0"
requires-port "^1.0.0"
-https-browserify@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73"
- integrity sha512-J+FkSdyD+0mA0N+81tMotaRMfSL9SGi+xpD3T6YApKsc3bGSXJlfXri3VyFOeYkfLRQisDk1W+jIFFKBeUBbBg==
-
https-proxy-agent@^5.0.0:
version "5.0.1"
resolved "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6"
@@ -9682,16 +9560,11 @@ icss-utils@^5.0.0, icss-utils@^5.1.0:
resolved "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae"
integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==
-ieee754@^1.1.13, ieee754@^1.1.4:
+ieee754@^1.1.13:
version "1.2.1"
resolved "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352"
integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==
-iferr@^0.1.5:
- version "0.1.5"
- resolved "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
- integrity sha512-DUNFN5j7Tln0D+TxzloUjKB+CtVu6myn0JEFak6dG18mNt9YkQ6lzGCdafwofISZ1lLF3xRHJ98VKy9ynkcFaA==
-
ignore@^4.0.6:
version "4.0.6"
resolved "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc"
@@ -9702,10 +9575,10 @@ ignore@^5.1.1, ignore@^5.2.0:
resolved "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz#5073e554cd42c5b33b394375f538b8593e34d4ef"
integrity sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==
-immutable@^4.0.0:
- version "4.3.6"
- resolved "https://registry.npmjs.org/immutable/-/immutable-4.3.6.tgz#6a05f7858213238e587fb83586ffa3b4b27f0447"
- integrity sha512-Ju0+lEMyzMVZarkTn/gqRpdqd5dOPaz1mCZ0SH3JV6iFw81PldE/PEB1hWVEA288HPt4WXW8O7AWxB10M+03QQ==
+immutable@^5.0.2:
+ version "5.1.3"
+ resolved "https://registry.npmjs.org/immutable/-/immutable-5.1.3.tgz#e6486694c8b76c37c063cca92399fa64098634d4"
+ integrity sha512-+chQdDfvscSF1SJqv2gn4SRO2ZyS3xL3r7IW/wWEEzrzLisnOlKiQu5ytC/BVNcS15C39WT2Hg/bjKjDMcu+zg==
import-fresh@^3.0.0, import-fresh@^3.2.1:
version "3.3.0"
@@ -9730,11 +9603,6 @@ indent-string@^4.0.0:
resolved "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251"
integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==
-infer-owner@^1.0.3:
- version "1.0.4"
- resolved "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467"
- integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==
-
inflection@^1.12.0, inflection@~1.13.1:
version "1.13.4"
resolved "https://registry.npmjs.org/inflection/-/inflection-1.13.4.tgz#65aa696c4e2da6225b148d7a154c449366633a32"
@@ -9861,13 +9729,6 @@ ipaddr.js@1.9.1:
resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3"
integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
-is-accessor-descriptor@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.1.tgz#3223b10628354644b86260db29b3e693f5ceedd4"
- integrity sha512-YBUanLI8Yoihw923YeFUS5fs0fF2f5TSFTNiYAAzhhDscDa3lEqYuz1pDOEP5KvX94I9ey3vsqjJcLVFVU+3QA==
- dependencies:
- hasown "^2.0.0"
-
is-alphabetical@^1.0.0:
version "1.0.4"
resolved "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz#9e7d6b94916be22153745d184c298cbf986a686d"
@@ -9909,20 +9770,6 @@ is-bigint@^1.0.1:
dependencies:
has-bigints "^1.0.1"
-is-binary-path@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
- integrity sha512-9fRVlXc0uCxEDj1nQzaWONSpbTfx0FmJfzHF7pwlI8DkWGoHBBea4Pg5Ky0ojwwxQmnSifgbKkI06Qv0Ljgj+Q==
- dependencies:
- binary-extensions "^1.0.0"
-
-is-binary-path@~2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09"
- integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==
- dependencies:
- binary-extensions "^2.0.0"
-
is-boolean-object@^1.1.0:
version "1.1.2"
resolved "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719"
@@ -9931,11 +9778,6 @@ is-boolean-object@^1.1.0:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
-is-buffer@^1.1.5:
- version "1.1.6"
- resolved "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be"
- integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==
-
is-buffer@^2.0.0:
version "2.0.5"
resolved "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191"
@@ -9953,13 +9795,6 @@ is-core-module@^2.13.0:
dependencies:
hasown "^2.0.2"
-is-data-descriptor@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.1.tgz#2109164426166d32ea38c405c1e0945d9e6a4eeb"
- integrity sha512-bc4NlCDiCr28U4aEsQ3Qs2491gVq4V8G7MQyws968ImqjKuYtTJXrl7Vq7jsN7Ly/C3xj5KWFrY7sHNeDkAzXw==
- dependencies:
- hasown "^2.0.0"
-
is-data-view@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f"
@@ -9979,39 +9814,11 @@ is-decimal@^1.0.0:
resolved "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz#65a3a5958a1c5b63a706e1b333d7cd9f630d3fa5"
integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==
-is-descriptor@^0.1.0:
- version "0.1.7"
- resolved "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.7.tgz#2727eb61fd789dcd5bdf0ed4569f551d2fe3be33"
- integrity sha512-C3grZTvObeN1xud4cRWl366OMXZTj0+HGyk4hvfpx4ZHt1Pb60ANSXqCK7pdOTeUQpRzECBSTphqvD7U+l22Eg==
- dependencies:
- is-accessor-descriptor "^1.0.1"
- is-data-descriptor "^1.0.1"
-
-is-descriptor@^1.0.0, is-descriptor@^1.0.2:
- version "1.0.3"
- resolved "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.3.tgz#92d27cb3cd311c4977a4db47df457234a13cb306"
- integrity sha512-JCNNGbwWZEVaSPtS45mdtrneRWJFp07LLmykxeFV5F6oBvNF8vHSfJuJgoT472pSfk+Mf8VnlrspaFBHWM8JAw==
- dependencies:
- is-accessor-descriptor "^1.0.1"
- is-data-descriptor "^1.0.1"
-
is-docker@^2.0.0:
version "2.2.1"
resolved "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa"
integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==
-is-extendable@^0.1.0, is-extendable@^0.1.1:
- version "0.1.1"
- resolved "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
- integrity sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==
-
-is-extendable@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4"
- integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==
- dependencies:
- is-plain-object "^2.0.4"
-
is-extglob@^2.1.0, is-extglob@^2.1.1:
version "2.1.1"
resolved "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
@@ -10051,7 +9858,7 @@ is-glob@^3.1.0:
dependencies:
is-extglob "^2.1.0"
-is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1:
+is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3:
version "4.0.3"
resolved "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084"
integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
@@ -10090,13 +9897,6 @@ is-number-object@^1.0.4:
dependencies:
has-tostringtag "^1.0.0"
-is-number@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195"
- integrity sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==
- dependencies:
- kind-of "^3.0.2"
-
is-number@^7.0.0:
version "7.0.0"
resolved "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b"
@@ -10122,13 +9922,6 @@ is-plain-obj@^1.1:
resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e"
integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==
-is-plain-object@^2.0.3, is-plain-object@^2.0.4:
- version "2.0.4"
- resolved "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677"
- integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==
- dependencies:
- isobject "^3.0.1"
-
is-potential-custom-element-name@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz#171ed6f19e3ac554394edf78caa05784a45bebb5"
@@ -10176,6 +9969,13 @@ is-string@^1.0.5, is-string@^1.0.7:
dependencies:
has-tostringtag "^1.0.0"
+is-subdir@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmjs.org/is-subdir/-/is-subdir-1.2.0.tgz#b791cd28fab5202e91a08280d51d9d7254fd20d4"
+ integrity sha512-2AT6j+gXe/1ueqbW6fLZJiIw3F8iXGJtt0yDrZaBhAZEG1raiTxKWU+IPqMCzQAXOUCKdA4UDMgacKH25XG2Cw==
+ dependencies:
+ better-path-resolve "1.0.0"
+
is-symbol@^1.0.2, is-symbol@^1.0.3:
version "1.0.4"
resolved "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c"
@@ -10227,16 +10027,11 @@ is-weakset@^2.0.3:
call-bind "^1.0.7"
get-intrinsic "^1.2.4"
-is-windows@^1.0.1, is-windows@^1.0.2:
+is-windows@^1.0.0, is-windows@^1.0.1:
version "1.0.2"
resolved "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d"
integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==
-is-wsl@^1.1.0:
- version "1.1.0"
- resolved "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d"
- integrity sha512-gfygJYZ2gLTDlmbWMI0CE2MwnFzSN/2SZfkMlItC4K/JBlsWVDB0bO6XhqcY13YXE7iMcAJnzTCJjPiTeJJ0Mw==
-
is-wsl@^2.2.0:
version "2.2.0"
resolved "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271"
@@ -10276,11 +10071,6 @@ isobject@^2.0.0:
dependencies:
isarray "1.0.0"
-isobject@^3.0.0, isobject@^3.0.1:
- version "3.0.1"
- resolved "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df"
- integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==
-
istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0:
version "3.2.2"
resolved "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756"
@@ -10332,15 +10122,6 @@ istextorbinary@^2.5.1:
editions "^2.2.0"
textextensions "^2.5.0"
-ivy-codemirror@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/ivy-codemirror/-/ivy-codemirror-2.1.0.tgz#c06f1606c375610bf62b007a21a9e63f5854175e"
- integrity sha512-+Ha6Yf39fiK3dfQD5vlanrQ8GMIf/KVRbxzEzG+AsvAgUNSO8VECCfIRzdHQZcBfi9jNCaT+9q6VQd7mSqNalQ==
- dependencies:
- codemirror "~5.15.0"
- ember-cli-babel "^6.0.0"
- ember-cli-node-assets "^0.2.2"
-
jest-worker@^27.4.5:
version "27.5.1"
resolved "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz#8d146f0900e8973b106b6f73cc1e9a8cb86f8db0"
@@ -10350,11 +10131,6 @@ jest-worker@^27.4.5:
merge-stream "^2.0.0"
supports-color "^8.0.0"
-jiti@^1.21.0:
- version "1.21.6"
- resolved "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268"
- integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==
-
jquery@^3.4.1, jquery@^3.5.1:
version "3.7.1"
resolved "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz#083ef98927c9a6a74d05a6af02806566d16274de"
@@ -10433,6 +10209,11 @@ jsesc@^2.5.0, jsesc@^2.5.1:
resolved "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
+jsesc@^3.0.2:
+ version "3.1.0"
+ resolved "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d"
+ integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==
+
jsesc@~0.3.x:
version "0.3.0"
resolved "https://registry.npmjs.org/jsesc/-/jsesc-0.3.0.tgz#1bf5ee63b4539fe2e26d0c1e99c240b97a457972"
@@ -10448,7 +10229,7 @@ json-buffer@3.0.1:
resolved "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
-json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2:
+json-parse-better-errors@^1.0.1:
version "1.0.2"
resolved "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9"
integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==
@@ -10483,19 +10264,7 @@ json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
jsonify "^0.0.1"
object-keys "^1.1.1"
-json5@^0.5.1:
- version "0.5.1"
- resolved "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
- integrity sha512-4xrs1aW+6N5DalkqSVA8fxh458CXvR99WU8WLKmq4v8eWAL86Xo3BVqyd3SkA9wEVjCMqyvvRRkshAdOnBp5rw==
-
-json5@^1.0.1, json5@^1.0.2:
- version "1.0.2"
- resolved "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593"
- integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==
- dependencies:
- minimist "^1.2.0"
-
-json5@^2.1.2, json5@^2.2.3:
+json5@^0.5.1, json5@^1.0.2, json5@^2.1.2, json5@^2.2.3:
version "2.2.3"
resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283"
integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==
@@ -10540,25 +10309,6 @@ keyv@^4.5.3:
dependencies:
json-buffer "3.0.1"
-kind-of@^3.0.2, kind-of@^3.0.3:
- version "3.2.2"
- resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64"
- integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==
- dependencies:
- is-buffer "^1.1.5"
-
-kind-of@^4.0.0:
- version "4.0.0"
- resolved "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57"
- integrity sha512-24XsCxmEbRwEDbz/qz3stgin8TTzZ1ESR56OMCN0ujYg+vRutNSiOj9bHH9u85DKgXguraugV5sFuvbD4FW/hw==
- dependencies:
- is-buffer "^1.1.5"
-
-kind-of@^6.0.2:
- version "6.0.3"
- resolved "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd"
- integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==
-
layout-bin-packer@^1.4.0:
version "1.5.0"
resolved "https://registry.npmjs.org/layout-bin-packer/-/layout-bin-packer-1.5.0.tgz#2e950456083621fe01f82007d896294f5e31e89c"
@@ -10599,16 +10349,6 @@ license-checker@^25.0.1:
spdx-satisfies "^4.0.0"
treeify "^1.1.0"
-lilconfig@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz#78e23ac89ebb7e1bfbf25b18043de756548e7f52"
- integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==
-
-lilconfig@^3.0.0:
- version "3.1.2"
- resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz#e4a7c3cb549e3a606c8dcc32e5ae1005e62c05cb"
- integrity sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==
-
line-column@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/line-column/-/line-column-1.0.2.tgz#d25af2936b6f4849172b312e4792d1d987bc34a2"
@@ -10679,25 +10419,11 @@ load-json-file@^4.0.0:
pify "^3.0.0"
strip-bom "^3.0.0"
-loader-runner@^2.4.0:
- version "2.4.0"
- resolved "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357"
- integrity sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==
-
loader-runner@^4.2.0:
version "4.3.0"
resolved "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz#c1b4a163b99f614830353b16755e7149ac2314e1"
integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==
-loader-utils@^1.2.3:
- version "1.4.2"
- resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.2.tgz#29a957f3a63973883eb684f10ffd3d151fec01a3"
- integrity sha512-I5d00Pd/jwMD2QCduo657+YM/6L3KZu++pmX9VFncxaxvHcru9jx1lBaFft+r4Mt2jK0Yhp41XlRAihzPxHNCg==
- dependencies:
- big.js "^5.2.2"
- emojis-list "^3.0.0"
- json5 "^1.0.1"
-
loader-utils@^2.0.0:
version "2.0.4"
resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz#8b5cb38b5c34a9a018ee1fc0e6a066d1dfcc528c"
@@ -11008,6 +10734,11 @@ lru-cache@^6.0.0:
dependencies:
yallist "^4.0.0"
+luxon@^3.4.2:
+ version "3.6.1"
+ resolved "https://registry.npmjs.org/luxon/-/luxon-3.6.1.tgz#d283ffc4c0076cb0db7885ec6da1c49ba97e47b0"
+ integrity sha512-tJLxrKJhO2ukZ5z0gyjY1zPh3Rh88Ej9P7jNrZiHMUXHae1yvI2imgOZtL1TO8TW6biMMKfTtAOoEJANgtWBMQ==
+
magic-string@^0.25.7:
version "0.25.9"
resolved "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz#de7f9faf91ef8a1c91d02c2e5314c8277dbcdd1c"
@@ -11015,14 +10746,6 @@ magic-string@^0.25.7:
dependencies:
sourcemap-codec "^1.4.8"
-make-dir@^2.0.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5"
- integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==
- dependencies:
- pify "^4.0.1"
- semver "^5.6.0"
-
make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0:
version "3.1.0"
resolved "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f"
@@ -11044,18 +10767,6 @@ makeerror@1.0.12:
dependencies:
tmpl "1.0.5"
-map-cache@^0.2.2:
- version "0.2.2"
- resolved "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf"
- integrity sha512-8y/eV9QQZCiyn1SprXSrCmqJN0yNRATe+PO8ztwqrvrbdRLA3eYJF0yaR0YayLWkMbsQSKWS9N2gPcGEc4UsZg==
-
-map-visit@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f"
- integrity sha512-4y7uGv8bd2WdM9vpQsiQNo41Ln1NvhvDRuVt0k2JZQ+ezN2uaQes7lZeZ+QQUHOLQAtDaBJ+7wCbi+ab/KFs+w==
- dependencies:
- object-visit "^1.0.0"
-
markdown-it-terminal@0.2.1:
version "0.2.1"
resolved "https://registry.npmjs.org/markdown-it-terminal/-/markdown-it-terminal-0.2.1.tgz#670fd5ea824a7dcaa1591dcbeef28bf70aff1705"
@@ -11100,6 +10811,11 @@ matcher-collection@^2.0.0, matcher-collection@^2.0.1:
"@types/minimatch" "^3.0.3"
minimatch "^3.0.2"
+math-intrinsics@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
+ integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==
+
md5-hex@^3.0.1:
version "3.0.1"
resolved "https://registry.npmjs.org/md5-hex/-/md5-hex-3.0.1.tgz#be3741b510591434b2784d79e556eefc2c9a8e5c"
@@ -11107,15 +10823,6 @@ md5-hex@^3.0.1:
dependencies:
blueimp-md5 "^2.10.0"
-md5.js@^1.3.4:
- version "1.3.5"
- resolved "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f"
- integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==
- dependencies:
- hash-base "^3.0.0"
- inherits "^2.0.1"
- safe-buffer "^5.1.2"
-
mdast-normalize-headings@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/mdast-normalize-headings/-/mdast-normalize-headings-2.0.0.tgz#378c8161a9f57fcf52a6fd5628507af370c7f8c5"
@@ -11271,14 +10978,6 @@ media-typer@0.3.0:
resolved "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==
-memory-fs@^0.4.1:
- version "0.4.1"
- resolved "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552"
- integrity sha512-cda4JKCxReDXFXRqOHPQscuIYg1PvxbE2S2GP45rnwfEK+vZaXC8C1OFvdHIbgw0DLzowXGVoxLaAmlgRy14GQ==
- dependencies:
- errno "^0.1.3"
- readable-stream "^2.0.1"
-
memory-fs@^0.5.0:
version "0.5.0"
resolved "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz#324c01288b88652966d161db77838720845a8e3c"
@@ -11341,11 +11040,6 @@ merge2@^1.2.3, merge2@^1.3.0, merge2@^1.4.1:
resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
-merge@^2.1.1:
- version "2.1.1"
- resolved "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz#59ef4bf7e0b3e879186436e8481c06a6c162ca98"
- integrity sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w==
-
methods@~1.1.2:
version "1.1.2"
resolved "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
@@ -11418,47 +11112,20 @@ micromark@^2.11.3, micromark@~2.11.0, micromark@~2.11.3:
debug "^4.0.0"
parse-entities "^2.0.0"
-micromatch@^3.1.10, micromatch@^3.1.4:
- version "3.1.10"
- resolved "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23"
- integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==
- dependencies:
- arr-diff "^4.0.0"
- array-unique "^0.3.2"
- braces "^2.3.1"
- define-property "^2.0.2"
- extend-shallow "^3.0.2"
- extglob "^2.0.4"
- fragment-cache "^0.2.1"
- kind-of "^6.0.2"
- nanomatch "^1.2.9"
- object.pick "^1.3.0"
- regex-not "^1.0.0"
- snapdragon "^0.8.1"
- to-regex "^3.0.2"
-
-micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5:
- version "4.0.7"
- resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz#33e8190d9fe474a9895525f5618eee136d46c2e5"
- integrity sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==
+micromatch@4.0.8, micromatch@^3.1.10, micromatch@^3.1.4, micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5:
+ version "4.0.8"
+ resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202"
+ integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==
dependencies:
braces "^3.0.3"
picomatch "^2.3.1"
-miller-rabin@^4.0.0:
- version "4.0.1"
- resolved "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d"
- integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==
- dependencies:
- bn.js "^4.0.0"
- brorand "^1.0.1"
-
mime-db@1.52.0, "mime-db@>= 1.43.0 < 2":
version "1.52.0"
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
-mime-types@^2.1.12, mime-types@^2.1.18, mime-types@^2.1.26, mime-types@^2.1.27, mime-types@~2.1.24, mime-types@~2.1.34:
+mime-types@^2.1.18, mime-types@^2.1.26, mime-types@^2.1.27, mime-types@^2.1.35, mime-types@~2.1.24, mime-types@~2.1.34:
version "2.1.35"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
@@ -11505,16 +11172,16 @@ minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1:
dependencies:
brace-expansion "^1.1.7"
-minimist@>=1.2.5, minimist@^1.1.1, minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8:
- version "1.2.8"
- resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
- integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
-
minimist@^0.2.1:
version "0.2.4"
resolved "https://registry.npmjs.org/minimist/-/minimist-0.2.4.tgz#0085d5501e29033748a2f2a4da0180142697a475"
integrity sha512-Pkrrm8NjyQ8yVt8Am9M+yUt74zE3iokhzbG1bFVNjLB92vwM71hf40RkEsryg98BujhVOncKm/C1xROxZ030LQ==
+minimist@^1.1.1, minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8:
+ version "1.2.8"
+ resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
+ integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
+
minipass@^2.2.0:
version "2.9.0"
resolved "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6"
@@ -11523,38 +11190,14 @@ minipass@^2.2.0:
safe-buffer "^5.1.2"
yallist "^3.0.0"
-mississippi@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz#ea0a3291f97e0b5e8776b363d5f0a12d94c67022"
- integrity sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==
- dependencies:
- concat-stream "^1.5.0"
- duplexify "^3.4.2"
- end-of-stream "^1.1.0"
- flush-write-stream "^1.0.0"
- from2 "^2.1.0"
- parallel-transform "^1.1.0"
- pump "^3.0.0"
- pumpify "^1.3.3"
- stream-each "^1.1.0"
- through2 "^2.0.0"
-
-mixin-deep@^1.2.0:
- version "1.3.2"
- resolved "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566"
- integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==
- dependencies:
- for-in "^1.0.2"
- is-extendable "^1.0.1"
-
-mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@^0.5.3, mkdirp@^0.5.5, mkdirp@^0.5.6:
+mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@^0.5.5, mkdirp@^0.5.6:
version "0.5.6"
resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6"
integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
dependencies:
minimist "^1.2.6"
-mkdirp@^1.0.3, mkdirp@^1.0.4:
+mkdirp@^1.0.4:
version "1.0.4"
resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e"
integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==
@@ -11604,18 +11247,6 @@ mout@^1.0.0:
resolved "https://registry.npmjs.org/mout/-/mout-1.2.4.tgz#9ffd261c4d6509e7ebcbf6b641a89b36ecdf8155"
integrity sha512-mZb9uOruMWgn/fw28DG4/yE3Kehfk1zKCLhuDU2O3vlKdnBBr4XaOCqVTflJ5aODavGUPqFHZgrFX3NJVuxGhQ==
-move-concurrently@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92"
- integrity sha512-hdrFxZOycD/g6A6SoI2bB5NA/5NEqD0569+S47WZhPvm46sD50ZHdYaFmnua5lndde9rCHGjmfK7Z8BuCt/PcQ==
- dependencies:
- aproba "^1.1.1"
- copy-concurrently "^1.0.0"
- fs-write-stream-atomic "^1.0.8"
- mkdirp "^0.5.1"
- rimraf "^2.5.4"
- run-queue "^1.0.3"
-
ms@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
@@ -11646,46 +11277,15 @@ mute-stream@0.0.8:
resolved "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d"
integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==
-mz@^2.7.0:
- version "2.7.0"
- resolved "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32"
- integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==
- dependencies:
- any-promise "^1.0.0"
- object-assign "^4.0.1"
- thenify-all "^1.0.0"
-
-nan@^2.12.1:
- version "2.20.0"
- resolved "https://registry.npmjs.org/nan/-/nan-2.20.0.tgz#08c5ea813dd54ed16e5bd6505bf42af4f7838ca3"
- integrity sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw==
-
nanoassert@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/nanoassert/-/nanoassert-1.1.0.tgz#4f3152e09540fde28c76f44b19bbcd1d5a42478d"
integrity sha512-C40jQ3NzfkP53NsO8kEOFd79p4b9kDXQMwgiY1z8ZwrDZgUyom0AHwGegF4Dm99L+YoYhuaB0ceerUcXmqr1rQ==
-nanoid@^3.3.7:
- version "3.3.7"
- resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
- integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
-
-nanomatch@^1.2.9:
- version "1.2.13"
- resolved "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119"
- integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==
- dependencies:
- arr-diff "^4.0.0"
- array-unique "^0.3.2"
- define-property "^2.0.2"
- extend-shallow "^3.0.2"
- fragment-cache "^0.2.1"
- is-windows "^1.0.2"
- kind-of "^6.0.2"
- object.pick "^1.3.0"
- regex-not "^1.0.0"
- snapdragon "^0.8.1"
- to-regex "^3.0.1"
+nanoid@3.3.8, nanoid@^3.3.7:
+ version "3.3.8"
+ resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf"
+ integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==
natural-compare@^1.4.0:
version "1.4.0"
@@ -11697,7 +11297,7 @@ negotiator@0.6.3:
resolved "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd"
integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==
-neo-async@^2.5.0, neo-async@^2.6.1, neo-async@^2.6.2:
+neo-async@^2.6.2:
version "2.6.2"
resolved "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f"
integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==
@@ -11738,6 +11338,11 @@ no-case@^3.0.4:
lower-case "^2.0.2"
tslib "^2.0.3"
+node-addon-api@^7.0.0:
+ version "7.1.1"
+ resolved "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz#1aba6693b0f255258a049d621329329322aad558"
+ integrity sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==
+
node-dir@^0.1.17:
version "0.1.17"
resolved "https://registry.npmjs.org/node-dir/-/node-dir-0.1.17.tgz#5f5665d93351335caabef8f1c554516cf5f1e4e5"
@@ -11757,35 +11362,6 @@ node-int64@^0.4.0:
resolved "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
-node-libs-browser@^2.2.1:
- version "2.2.1"
- resolved "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz#b64f513d18338625f90346d27b0d235e631f6425"
- integrity sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==
- dependencies:
- assert "^1.1.1"
- browserify-zlib "^0.2.0"
- buffer "^4.3.0"
- console-browserify "^1.1.0"
- constants-browserify "^1.0.0"
- crypto-browserify "^3.11.0"
- domain-browser "^1.1.1"
- events "^3.0.0"
- https-browserify "^1.0.0"
- os-browserify "^0.3.0"
- path-browserify "0.0.1"
- process "^0.11.10"
- punycode "^1.2.4"
- querystring-es3 "^0.2.0"
- readable-stream "^2.3.3"
- stream-browserify "^2.0.1"
- stream-http "^2.7.2"
- string_decoder "^1.0.0"
- timers-browserify "^2.0.4"
- tty-browserify "0.0.0"
- url "^0.11.0"
- util "^0.11.0"
- vm-browserify "^1.0.1"
-
node-modules-path@^1.0.0, node-modules-path@^1.0.1:
version "1.0.2"
resolved "https://registry.npmjs.org/node-modules-path/-/node-modules-path-1.0.2.tgz#e3acede9b7baf4bc336e3496b58e5b40d517056e"
@@ -11845,7 +11421,7 @@ normalize-path@^2.1.1:
dependencies:
remove-trailing-separator "^1.0.1"
-normalize-path@^3.0.0, normalize-path@~3.0.0:
+normalize-path@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65"
integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==
@@ -11940,30 +11516,16 @@ nwsapi@^2.2.0:
resolved "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.10.tgz#0b77a68e21a0b483db70b11fad055906e867cda8"
integrity sha512-QK0sRs7MKv0tKe1+5uZIQk/C8XGza4DAnztJG8iD+TpJIORARrCxczA738awHrZoHeTjSSoHqao2teO0dC/gFQ==
-object-assign@4.1.1, object-assign@^4, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1:
+object-assign@4.1.1, object-assign@^4, object-assign@^4.1.0:
version "4.1.1"
resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
-object-copy@^0.1.0:
- version "0.1.0"
- resolved "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c"
- integrity sha512-79LYn6VAb63zgtmAteVOWo9Vdj71ZVBy3Pbse+VqxDpEP83XuujMrGqHIwAXJ5I/aM0zU7dIyIAhifVTPrNItQ==
- dependencies:
- copy-descriptor "^0.1.0"
- define-property "^0.2.5"
- kind-of "^3.0.3"
-
object-hash@^1.3.1:
version "1.3.1"
resolved "https://registry.npmjs.org/object-hash/-/object-hash-1.3.1.tgz#fde452098a951cb145f039bb7d455449ddc126df"
integrity sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA==
-object-hash@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz#73f97f753e7baffc0e2cc9d6e079079744ac82e9"
- integrity sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==
-
object-inspect@^1.13.1:
version "1.13.2"
resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff"
@@ -11982,13 +11544,6 @@ object-keys@^1.1.1:
resolved "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
-object-visit@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb"
- integrity sha512-GBaMwwAVK9qbQN3Scdo0OyvgPW7l3lnaVMj84uTOZlswkX0KpF6fyDBJhtTthf7pymztoN36/KEr1DyhF96zEA==
- dependencies:
- isobject "^3.0.0"
-
object.assign@^4.1.4, object.assign@^4.1.5:
version "4.1.5"
resolved "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0"
@@ -11999,13 +11554,6 @@ object.assign@^4.1.4, object.assign@^4.1.5:
has-symbols "^1.0.3"
object-keys "^1.1.1"
-object.pick@^1.3.0:
- version "1.3.0"
- resolved "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747"
- integrity sha512-tqa/UMy/CCoYmj+H5qc07qvSL9dqcs/WZENZ1JbtWBlATP+iVOe778gE6MSijnyCnORzDuX6hU+LA4SZ09YjFQ==
- dependencies:
- isobject "^3.0.1"
-
obliterator@^2.0.0:
version "2.0.4"
resolved "https://registry.npmjs.org/obliterator/-/obliterator-2.0.4.tgz#fa650e019b2d075d745e44f1effeb13a2adbe816"
@@ -12095,11 +11643,6 @@ ora@^5.4.0:
strip-ansi "^6.0.0"
wcwidth "^1.0.1"
-os-browserify@^0.3.0:
- version "0.3.0"
- resolved "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27"
- integrity sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==
-
os-homedir@^1.0.0:
version "1.0.2"
resolved "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
@@ -12213,20 +11756,6 @@ p-try@^2.0.0:
resolved "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
-pako@~1.0.5:
- version "1.0.11"
- resolved "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf"
- integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==
-
-parallel-transform@^1.1.0:
- version "1.2.0"
- resolved "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz#9049ca37d6cb2182c3b1d2c720be94d14a5814fc"
- integrity sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==
- dependencies:
- cyclist "^1.0.1"
- inherits "^2.0.3"
- readable-stream "^2.1.5"
-
parent-module@^1.0.0:
version "1.0.1"
resolved "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
@@ -12234,22 +11763,10 @@ parent-module@^1.0.0:
dependencies:
callsites "^3.0.0"
-parse-asn1@^5.0.0, parse-asn1@^5.1.7:
- version "5.1.7"
- resolved "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.7.tgz#73cdaaa822125f9647165625eb45f8a051d2df06"
- integrity sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==
- dependencies:
- asn1.js "^4.10.1"
- browserify-aes "^1.2.0"
- evp_bytestokey "^1.0.3"
- hash-base "~3.0"
- pbkdf2 "^3.1.2"
- safe-buffer "^5.2.1"
-
-parse-duration@^1.0.0:
- version "1.1.0"
- resolved "https://registry.npmjs.org/parse-duration/-/parse-duration-1.1.0.tgz#5192084c5d8f2a3fd676d04a451dbd2e05a1819c"
- integrity sha512-z6t9dvSJYaPoQq7quMzdEagSFtpGu+utzHqqxmpVWNNZRIXnvqyCvn9XsTdh7c/w0Bqmdz3RB3YnRaKtpRtEXQ==
+parse-duration@^2.1.3:
+ version "2.1.4"
+ resolved "https://registry.npmjs.org/parse-duration/-/parse-duration-2.1.4.tgz#02918736726f657eaf70b52bb8da7910316df51d"
+ integrity sha512-b98m6MsCh+akxfyoz9w9dt0AlH2dfYLOBss5SdDsr9pkhKNvkWBXU/r8A4ahmIGByBOLV2+4YwfCuFxbDDaGyg==
parse-entities@^2.0.0:
version "2.0.0"
@@ -12306,16 +11823,6 @@ parseurl@~1.3.3:
resolved "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4"
integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
-pascalcase@^0.1.1:
- version "0.1.1"
- resolved "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14"
- integrity sha512-XHXfu/yOQRy9vYOtUDVMN60OEJjW013GoObG1o+xwQTpB9eYJX/BjXMsdW13ZDPruFhYYn0AG22w0xgQMwl3Nw==
-
-path-browserify@0.0.1:
- version "0.0.1"
- resolved "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz#e6c4ddd7ed3aa27c68a20cc4e50e1a4ee83bbc4a"
- integrity sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==
-
path-dirname@^1.0.0:
version "1.0.2"
resolved "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0"
@@ -12368,15 +11875,15 @@ path-root@^0.1.1:
dependencies:
path-root-regex "^0.1.0"
-path-to-regexp@0.1.7:
- version "0.1.7"
- resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
- integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==
+path-to-regexp@0.1.12, path-to-regexp@0.1.7:
+ version "0.1.12"
+ resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz#d5e1a12e478a976d432ef3c58d534b9923164bb7"
+ integrity sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==
-path-to-regexp@^1.7.0:
- version "1.8.0"
- resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a"
- integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
+path-to-regexp@1.9.0, path-to-regexp@^1.7.0:
+ version "1.9.0"
+ resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz#5dc0753acbf8521ca2e0f137b4578b917b10cf24"
+ integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==
dependencies:
isarray "0.0.1"
@@ -12392,23 +11899,17 @@ path-type@^4.0.0:
resolved "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==
-pbkdf2@^3.0.3, pbkdf2@^3.1.2:
- version "3.1.2"
- resolved "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz#dd822aa0887580e52f1a039dc3eda108efae3075"
- integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==
- dependencies:
- create-hash "^1.1.2"
- create-hmac "^1.1.4"
- ripemd160 "^2.0.1"
- safe-buffer "^5.0.1"
- sha.js "^2.4.8"
-
picocolors@^1.0.0, picocolors@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1"
integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
-picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1:
+picocolors@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b"
+ integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==
+
+picomatch@^2.3.1:
version "2.3.1"
resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
@@ -12418,21 +11919,11 @@ pidtree@^0.3.0:
resolved "https://registry.npmjs.org/pidtree/-/pidtree-0.3.1.tgz#ef09ac2cc0533df1f3250ccf2c4d366b0d12114a"
integrity sha512-qQbW94hLHEqCg7nhby4yRC7G2+jYHY4Rguc2bjw7Uug4GIJuu1tvf2uHaZv5Q8zdt+WKJ6qK1FOI6amaWUo5FA==
-pify@^2.3.0:
- version "2.3.0"
- resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
- integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==
-
pify@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176"
integrity sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==
-pify@^4.0.1:
- version "4.0.1"
- resolved "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231"
- integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==
-
pinkie-promise@^2.0.0:
version "2.0.1"
resolved "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
@@ -12445,18 +11936,6 @@ pinkie@^2.0.0:
resolved "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
integrity sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==
-pirates@^4.0.1:
- version "4.0.6"
- resolved "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz#3018ae32ecfcff6c29ba2267cbf21166ac1f36b9"
- integrity sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==
-
-pkg-dir@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3"
- integrity sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==
- dependencies:
- find-up "^3.0.0"
-
pkg-dir@^4.1.0:
version "4.2.0"
resolved "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3"
@@ -12471,6 +11950,11 @@ pkg-dir@^5.0.0:
dependencies:
find-up "^5.0.0"
+pkg-entry-points@^1.1.0:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/pkg-entry-points/-/pkg-entry-points-1.1.1.tgz#d5cd87f934e873bf73143ed1d0baf637e5f8fda4"
+ integrity sha512-BhZa7iaPmB4b3vKIACoppyUoYn8/sFs17VJJtzrzPZvEnN2nqrgg911tdL65lA2m1ml6UI3iPeYbZQ4VXpn1mA==
+
pkg-up@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz#c819ac728059a461cab1c3889a2be3c49a004d7f"
@@ -12501,40 +11985,11 @@ portfinder@^1.0.28:
debug "^3.2.7"
mkdirp "^0.5.6"
-posix-character-classes@^0.1.0:
- version "0.1.1"
- resolved "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab"
- integrity sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==
-
possible-typed-array-names@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f"
integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==
-postcss-import@^15.1.0:
- version "15.1.0"
- resolved "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz#41c64ed8cc0e23735a9698b3249ffdbf704adc70"
- integrity sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==
- dependencies:
- postcss-value-parser "^4.0.0"
- read-cache "^1.0.0"
- resolve "^1.1.7"
-
-postcss-js@^4.0.1:
- version "4.0.1"
- resolved "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz#61598186f3703bab052f1c4f7d805f3991bee9d2"
- integrity sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==
- dependencies:
- camelcase-css "^2.0.1"
-
-postcss-load-config@^4.0.1:
- version "4.0.2"
- resolved "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz#7159dcf626118d33e299f485d6afe4aff7c4a3e3"
- integrity sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==
- dependencies:
- lilconfig "^3.0.0"
- yaml "^2.3.4"
-
postcss-modules-extract-imports@^3.0.0:
version "3.1.0"
resolved "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz#b4497cb85a9c0c4b5aabeb759bb25e8d89f15002"
@@ -12563,14 +12018,7 @@ postcss-modules-values@^4.0.0:
dependencies:
icss-utils "^5.0.0"
-postcss-nested@^6.0.1:
- version "6.0.1"
- resolved "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz#f83dc9846ca16d2f4fa864f16e9d9f7d0961662c"
- integrity sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==
- dependencies:
- postcss-selector-parser "^6.0.11"
-
-postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4:
+postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4:
version "6.1.0"
resolved "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.0.tgz#49694cb4e7c649299fea510a29fa6577104bcf53"
integrity sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==
@@ -12578,12 +12026,12 @@ postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.2, postcss-selecto
cssesc "^3.0.0"
util-deprecate "^1.0.2"
-postcss-value-parser@^4.0.0, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0:
+postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0:
version "4.2.0"
resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514"
integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==
-postcss@^8.1.4, postcss@^8.2.15, postcss@^8.4.23:
+postcss@^8.2.15:
version "8.4.39"
resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.39.tgz#aa3c94998b61d3a9c259efa51db4b392e1bde0e3"
integrity sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==
@@ -12629,15 +12077,10 @@ printf@^0.6.1:
resolved "https://registry.npmjs.org/printf/-/printf-0.6.1.tgz#b9afa3d3b55b7f2e8b1715272479fc756ed88650"
integrity sha512-is0ctgGdPJ5951KulgfzvHGwJtZ5ck8l042vRkV6jrkpBzTmb/lueTqguWHy2JfVA+RY6gFVlaZgUS0j7S/dsw==
-prismjs@^1.29.0:
- version "1.29.0"
- resolved "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz#f113555a8fa9b57c35e637bba27509dcf802dd12"
- integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==
-
-prismjs@~1.27.0:
- version "1.27.0"
- resolved "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz#bb6ee3138a0b438a3653dd4d6ce0cc6510a45057"
- integrity sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==
+prismjs@1.30.0, prismjs@^1.30.0, prismjs@~1.27.0:
+ version "1.30.0"
+ resolved "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz#d9709969d9d4e16403f6f348c63553b19f0975a9"
+ integrity sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==
private@^0.1.6, private@^0.1.8:
version "0.1.8"
@@ -12656,21 +12099,11 @@ process-relative-require@^1.0.0:
dependencies:
node-modules-path "^1.0.0"
-process@^0.11.10:
- version "0.11.10"
- resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182"
- integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==
-
progress@^2.0.0:
version "2.0.3"
resolved "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8"
integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==
-promise-inflight@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3"
- integrity sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==
-
promise-map-series@^0.2.1:
version "0.2.3"
resolved "https://registry.npmjs.org/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
@@ -12713,26 +12146,6 @@ psl@^1.1.33:
resolved "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7"
integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==
-public-encrypt@^4.0.0:
- version "4.0.3"
- resolved "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0"
- integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==
- dependencies:
- bn.js "^4.1.0"
- browserify-rsa "^4.0.0"
- create-hash "^1.1.0"
- parse-asn1 "^5.0.0"
- randombytes "^2.0.1"
- safe-buffer "^5.1.2"
-
-pump@^2.0.0:
- version "2.0.1"
- resolved "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909"
- integrity sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==
- dependencies:
- end-of-stream "^1.1.0"
- once "^1.3.1"
-
pump@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64"
@@ -12741,20 +12154,6 @@ pump@^3.0.0:
end-of-stream "^1.1.0"
once "^1.3.1"
-pumpify@^1.3.3:
- version "1.5.1"
- resolved "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce"
- integrity sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==
- dependencies:
- duplexify "^3.6.0"
- inherits "^2.0.3"
- pump "^2.0.0"
-
-punycode@^1.2.4, punycode@^1.4.1:
- version "1.4.1"
- resolved "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
- integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==
-
punycode@^2.1.0, punycode@^2.1.1:
version "2.3.1"
resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5"
@@ -12767,18 +12166,20 @@ qs@6.11.0:
dependencies:
side-channel "^1.0.4"
-qs@^6.11.2, qs@^6.4.0:
+qs@6.13.0:
+ version "6.13.0"
+ resolved "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906"
+ integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==
+ dependencies:
+ side-channel "^1.0.6"
+
+qs@^6.4.0:
version "6.12.2"
resolved "https://registry.npmjs.org/qs/-/qs-6.12.2.tgz#5443b587f3bf73ac68968de491e5b25bafe04478"
integrity sha512-x+NLUpx9SYrcwXtX7ob1gnkSems4i/mGZX5SlYxwIau6RrUSODO89TR/XDGGpn5RPWSYIB+aSfuSlV5+CmbTBg==
dependencies:
side-channel "^1.0.6"
-querystring-es3@^0.2.0:
- version "0.2.1"
- resolved "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73"
- integrity sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==
-
querystringify@^2.1.1:
version "2.2.0"
resolved "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6"
@@ -12817,21 +12218,13 @@ qunit@^2.16.0, qunit@^2.17.2:
node-watch "0.7.3"
tiny-glob "0.2.9"
-randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5, randombytes@^2.1.0:
+randombytes@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a"
integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
dependencies:
safe-buffer "^5.1.0"
-randomfill@^1.0.3:
- version "1.0.4"
- resolved "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458"
- integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==
- dependencies:
- randombytes "^2.0.5"
- safe-buffer "^5.1.0"
-
range-parser@~1.2.1:
version "1.2.1"
resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031"
@@ -12860,13 +12253,6 @@ react-is@^17.0.1:
resolved "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0"
integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==
-read-cache@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz#e664ef31161166c9751cdbe8dbcf86b5fb58f774"
- integrity sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==
- dependencies:
- pify "^2.3.0"
-
read-installed@~4.0.3:
version "4.0.3"
resolved "https://registry.npmjs.org/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
@@ -12900,7 +12286,16 @@ read-pkg@^3.0.0:
normalize-package-data "^2.3.2"
path-type "^3.0.0"
-"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6, readable-stream@^2.3.8, readable-stream@~2.3.6:
+"readable-stream@2 || 3", readable-stream@^3.4.0, readable-stream@^3.6.0:
+ version "3.6.2"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967"
+ integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==
+ dependencies:
+ inherits "^2.0.3"
+ string_decoder "^1.1.1"
+ util-deprecate "^1.0.1"
+
+readable-stream@^2.0.1, readable-stream@^2.0.6:
version "2.3.8"
resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b"
integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
@@ -12913,15 +12308,6 @@ read-pkg@^3.0.0:
string_decoder "~1.1.1"
util-deprecate "~1.0.1"
-"readable-stream@2 || 3", readable-stream@^3.4.0, readable-stream@^3.6.0:
- version "3.6.2"
- resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967"
- integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==
- dependencies:
- inherits "^2.0.3"
- string_decoder "^1.1.1"
- util-deprecate "^1.0.1"
-
readable-stream@~1.0.2:
version "1.0.34"
resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
@@ -12942,21 +12328,10 @@ readdir-scoped-modules@^1.0.0:
graceful-fs "^4.1.2"
once "^1.3.0"
-readdirp@^2.2.1:
- version "2.2.1"
- resolved "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525"
- integrity sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==
- dependencies:
- graceful-fs "^4.1.11"
- micromatch "^3.1.10"
- readable-stream "^2.0.2"
-
-readdirp@~3.6.0:
- version "3.6.0"
- resolved "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7"
- integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==
- dependencies:
- picomatch "^2.2.1"
+readdirp@^4.0.1:
+ version "4.1.2"
+ resolved "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz#eb85801435fbf2a7ee58f19e0921b068fc69948d"
+ integrity sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==
recast@^0.18.1:
version "0.18.10"
@@ -13042,14 +12417,6 @@ regenerator-transform@^0.15.2:
dependencies:
"@babel/runtime" "^7.8.4"
-regex-not@^1.0.0, regex-not@^1.0.2:
- version "1.0.2"
- resolved "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c"
- integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==
- dependencies:
- extend-shallow "^3.0.2"
- safe-regex "^1.1.0"
-
regexp.prototype.flags@^1.5.1, regexp.prototype.flags@^1.5.2:
version "1.5.2"
resolved "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334"
@@ -13231,7 +12598,7 @@ reselect@^3.0.1:
resolved "https://registry.npmjs.org/reselect/-/reselect-3.0.1.tgz#efdaa98ea7451324d092b2b2163a6a1d7a9a2147"
integrity sha512-b/6tFZCmRhtBMa4xGqiiRp9jh9Aqi2A687Lo265cN0/QohJQEBPiQ52f4QB6i0eF3yp3hmLL21LSGBcML2dlxA==
-reselect@^4.0.0, reselect@^4.1.7:
+reselect@^4.0.0:
version "4.1.8"
resolved "https://registry.npmjs.org/reselect/-/reselect-4.1.8.tgz#3f5dc671ea168dccdeb3e141236f69f02eaec524"
integrity sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==
@@ -13293,12 +12660,12 @@ resolve-path@^1.4.0:
http-errors "~1.6.2"
path-is-absolute "1.0.1"
-resolve-url@^0.2.1:
- version "0.2.1"
- resolved "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a"
- integrity sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==
+resolve.exports@^2.0.2:
+ version "2.0.3"
+ resolved "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz#41955e6f1b4013b7586f873749a635dea07ebe3f"
+ integrity sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==
-resolve@^1.1.7, resolve@^1.10.0, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.12.0, resolve@^1.13.1, resolve@^1.14.2, resolve@^1.17.0, resolve@^1.20.0, resolve@^1.22.0, resolve@^1.22.2, resolve@^1.22.8, resolve@^1.3.3, resolve@^1.4.0, resolve@^1.5.0, resolve@^1.8.1:
+resolve@^1.1.7, resolve@^1.10.0, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.12.0, resolve@^1.13.1, resolve@^1.14.2, resolve@^1.17.0, resolve@^1.20.0, resolve@^1.22.0, resolve@^1.3.3, resolve@^1.4.0, resolve@^1.5.0, resolve@^1.8.1:
version "1.22.8"
resolved "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d"
integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==
@@ -13332,11 +12699,6 @@ restore-cursor@^3.1.0:
onetime "^5.1.0"
signal-exit "^3.0.2"
-ret@~0.1.10:
- version "0.1.15"
- resolved "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc"
- integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==
-
reusify@^1.0.4:
version "1.0.4"
resolved "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76"
@@ -13368,14 +12730,6 @@ rimraf@~2.6.2:
dependencies:
glob "^7.1.3"
-ripemd160@^2.0.0, ripemd160@^2.0.1:
- version "2.0.2"
- resolved "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c"
- integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==
- dependencies:
- hash-base "^3.0.0"
- inherits "^2.0.1"
-
rollup-pluginutils@^2.8.1:
version "2.8.2"
resolved "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.8.2.tgz#72f2af0748b592364dbd3389e600e5a9444a351e"
@@ -13383,10 +12737,10 @@ rollup-pluginutils@^2.8.1:
dependencies:
estree-walker "^0.6.1"
-rollup@^2.50.0:
- version "2.79.1"
- resolved "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz#bedee8faef7c9f93a2647ac0108748f497f081c7"
- integrity sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==
+rollup@2.79.2, rollup@^2.50.0:
+ version "2.79.2"
+ resolved "https://registry.npmjs.org/rollup/-/rollup-2.79.2.tgz#f150e4a5db4b121a21a747d762f701e5e9f49090"
+ integrity sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==
optionalDependencies:
fsevents "~2.3.2"
@@ -13422,13 +12776,6 @@ run-parallel@^1.1.9:
dependencies:
queue-microtask "^1.2.2"
-run-queue@^1.0.0, run-queue@^1.0.3:
- version "1.0.3"
- resolved "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47"
- integrity sha512-ntymy489o0/QQplUDnpYAYUsO50K9SBrIVaKCWDOJzYJts0f9WH9RFJkyagebkw5+y1oi00R7ynNW/d12GBumg==
- dependencies:
- aproba "^1.1.1"
-
rxjs@^6.4.0, rxjs@^6.6.0:
version "6.6.7"
resolved "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz#90ac018acabf491bf65044235d5863c4dab804c9"
@@ -13458,7 +12805,7 @@ safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
-safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0:
+safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@^5.1.2, safe-buffer@~5.2.0:
version "5.2.1"
resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
@@ -13477,13 +12824,6 @@ safe-regex-test@^1.0.3:
es-errors "^1.3.0"
is-regex "^1.1.4"
-safe-regex@^1.1.0:
- version "1.1.0"
- resolved "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e"
- integrity sha512-aJXcif4xnaNUzvUuC5gcb46oTS7zvg4jpMTnuqtrEPlR3vFr4pxtdTwaF1Qs3Enjn9HK+ZlwQui+a7z0SywIzg==
- dependencies:
- ret "~0.1.10"
-
"safer-buffer@>= 2.1.2 < 3":
version "2.1.2"
resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a"
@@ -13504,14 +12844,16 @@ sane@^4.0.0, sane@^4.1.0:
minimist "^1.1.1"
walker "~1.0.5"
-sass@^1.28.0, sass@^1.69.5:
- version "1.77.6"
- resolved "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz#898845c1348078c2e6d1b64f9ee06b3f8bd489e4"
- integrity sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==
+sass@^1.83.0, sass@^1.89.2:
+ version "1.89.2"
+ resolved "https://registry.npmjs.org/sass/-/sass-1.89.2.tgz#a771716aeae774e2b529f72c0ff2dfd46c9de10e"
+ integrity sha512-xCmtksBKd/jdJ9Bt9p7nPKiuqrlBMBuuGkQlkhZjjQk3Ty48lv93k5Dq6OPkKt4XwxDJ7tvlfrTa1MPA9bf+QA==
dependencies:
- chokidar ">=3.0.0 <4.0.0"
- immutable "^4.0.0"
+ chokidar "^4.0.0"
+ immutable "^5.0.2"
source-map-js ">=0.6.2 <2.0.0"
+ optionalDependencies:
+ "@parcel/watcher" "^2.4.1"
saxes@^5.0.1:
version "5.0.1"
@@ -13520,15 +12862,6 @@ saxes@^5.0.1:
dependencies:
xmlchars "^2.2.0"
-schema-utils@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770"
- integrity sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==
- dependencies:
- ajv "^6.1.0"
- ajv-errors "^1.0.0"
- ajv-keywords "^3.1.0"
-
schema-utils@^2.6.5:
version "2.7.1"
resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz#1ca4f32d1b24c590c203b8e7a50bf0ea4cd394d7"
@@ -13606,13 +12939,6 @@ send@0.18.0:
range-parser "~1.2.1"
statuses "2.0.1"
-serialize-javascript@^4.0.0:
- version "4.0.0"
- resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz#b525e1238489a5ecfc42afacc3fe99e666f4b1aa"
- integrity sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==
- dependencies:
- randombytes "^2.1.0"
-
serialize-javascript@^6.0.1:
version "6.0.2"
resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2"
@@ -13657,21 +12983,6 @@ set-function-name@^2.0.1, set-function-name@^2.0.2:
functions-have-names "^1.2.3"
has-property-descriptors "^1.0.2"
-set-value@^2.0.0, set-value@^2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz#a18d40530e6f07de4228c7defe4227af8cad005b"
- integrity sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==
- dependencies:
- extend-shallow "^2.0.1"
- is-extendable "^0.1.1"
- is-plain-object "^2.0.3"
- split-string "^3.0.1"
-
-setimmediate@^1.0.4:
- version "1.0.5"
- resolved "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285"
- integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==
-
setprototypeof@1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656"
@@ -13682,14 +12993,6 @@ setprototypeof@1.2.0:
resolved "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424"
integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==
-sha.js@^2.4.0, sha.js@^2.4.8:
- version "2.4.11"
- resolved "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7"
- integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==
- dependencies:
- inherits "^2.0.1"
- safe-buffer "^5.0.1"
-
shebang-command@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
@@ -13804,20 +13107,6 @@ snake-case@^3.0.3:
dot-case "^3.0.4"
tslib "^2.0.3"
-snapdragon@^0.8.1:
- version "0.8.2"
- resolved "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d"
- integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==
- dependencies:
- base "^0.11.1"
- debug "^2.2.0"
- define-property "^0.2.5"
- extend-shallow "^2.0.1"
- map-cache "^0.2.2"
- source-map "^0.5.6"
- source-map-resolve "^0.5.0"
- use "^3.1.0"
-
socket.io-adapter@~2.5.2:
version "2.5.5"
resolved "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.5.tgz#c7a1f9c703d7756844751b6ff9abfc1780664082"
@@ -13864,27 +13153,11 @@ sort-package-json@^1.49.0:
is-plain-obj "2.1.0"
sort-object-keys "^1.1.3"
-source-list-map@^2.0.0:
- version "2.0.1"
- resolved "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34"
- integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==
-
"source-map-js@>=0.6.2 <2.0.0", source-map-js@^1.0.1, source-map-js@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af"
integrity sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==
-source-map-resolve@^0.5.0:
- version "0.5.3"
- resolved "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a"
- integrity sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==
- dependencies:
- atob "^2.1.2"
- decode-uri-component "^0.2.0"
- resolve-url "^0.2.1"
- source-map-url "^0.4.0"
- urix "^0.1.0"
-
source-map-resolve@^0.6.0:
version "0.6.0"
resolved "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz#3d9df87e236b53f16d01e58150fc7711138e5ed2"
@@ -13900,7 +13173,7 @@ source-map-support@^0.4.15:
dependencies:
source-map "^0.5.6"
-source-map-support@~0.5.12, source-map-support@~0.5.20:
+source-map-support@~0.5.20:
version "0.5.21"
resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f"
integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
@@ -13913,11 +13186,6 @@ source-map-url@^0.3.0:
resolved "https://registry.npmjs.org/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
integrity sha512-QU4fa0D6aSOmrT+7OHpUXw+jS84T0MLaQNtFs8xzLNe6Arj44Magd7WEbyVW5LNYoAPVV35aKs4azxIfVJrToQ==
-source-map-url@^0.4.0:
- version "0.4.1"
- resolved "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz#0af66605a745a5a2f91cf1bbf8a7afbc283dec56"
- integrity sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==
-
source-map@0.4.x, source-map@^0.4.2:
version "0.4.4"
resolved "https://registry.npmjs.org/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
@@ -13942,11 +13210,6 @@ source-map@~0.1.x:
dependencies:
amdefine ">=0.0.4"
-source-map@~0.7.4:
- version "0.7.4"
- resolved "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz#a9bbe705c9d8846f4e08ff6765acf0f1b0898656"
- integrity sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==
-
sourcemap-codec@^1.4.8:
version "1.4.8"
resolved "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
@@ -14021,13 +13284,6 @@ spdx-satisfies@^4.0.0:
spdx-expression-parse "^3.0.0"
spdx-ranges "^2.0.0"
-split-string@^3.0.1:
- version "3.1.0"
- resolved "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2"
- integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==
- dependencies:
- extend-shallow "^3.0.0"
-
sprintf-js@^1.1.1:
version "1.1.3"
resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz#4914b903a2f8b685d17fdf78a70e917e872e444a"
@@ -14043,13 +13299,6 @@ sri-toolbox@^0.2.0:
resolved "https://registry.npmjs.org/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
integrity sha512-DQIMWCAr/M7phwo+d3bEfXwSBEwuaJL+SJx9cuqt1Ty7K96ZFoHpYnSbhrQZEr0+0/GtmpKECP8X/R4RyeTAfw==
-ssri@^6.0.1:
- version "6.0.2"
- resolved "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz#157939134f20464e7301ddba3e90ffa8f7728ac5"
- integrity sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==
- dependencies:
- figgy-pudding "^3.5.1"
-
stagehand@^1.0.0:
version "1.0.1"
resolved "https://registry.npmjs.org/stagehand/-/stagehand-1.0.1.tgz#0cbca6f906e4a7be36c5830dc31d9cc7091a827e"
@@ -14057,14 +13306,6 @@ stagehand@^1.0.0:
dependencies:
debug "^4.1.0"
-static-extend@^0.1.1:
- version "0.1.2"
- resolved "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6"
- integrity sha512-72E9+uLc27Mt718pMHt9VMNiAL4LMsmDbBva8mxWUCkT07fSzEGMYUCk0XWY6lp0j6RBAG4cJ3mWuZv2OE3s0g==
- dependencies:
- define-property "^0.2.5"
- object-copy "^0.1.0"
-
statuses@2.0.1:
version "2.0.1"
resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63"
@@ -14082,38 +13323,6 @@ stop-iteration-iterator@^1.0.0:
dependencies:
internal-slot "^1.0.4"
-stream-browserify@^2.0.1:
- version "2.0.2"
- resolved "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz#87521d38a44aa7ee91ce1cd2a47df0cb49dd660b"
- integrity sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==
- dependencies:
- inherits "~2.0.1"
- readable-stream "^2.0.2"
-
-stream-each@^1.1.0:
- version "1.2.3"
- resolved "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz#ebe27a0c389b04fbcc233642952e10731afa9bae"
- integrity sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==
- dependencies:
- end-of-stream "^1.1.0"
- stream-shift "^1.0.0"
-
-stream-http@^2.7.2:
- version "2.8.3"
- resolved "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc"
- integrity sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==
- dependencies:
- builtin-status-codes "^3.0.0"
- inherits "^2.0.1"
- readable-stream "^2.3.6"
- to-arraybuffer "^1.0.0"
- xtend "^4.0.0"
-
-stream-shift@^1.0.0:
- version "1.0.3"
- resolved "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.3.tgz#85b8fab4d71010fc3ba8772e8046cc49b8a3864b"
- integrity sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==
-
string-argv@0.3.1:
version "0.3.1"
resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.1.tgz#95e2fbec0427ae19184935f816d74aaa4c5c19da"
@@ -14211,7 +13420,7 @@ string_decoder@0.10, string_decoder@~0.10.x:
resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==
-string_decoder@^1.0.0, string_decoder@^1.1.1:
+string_decoder@^1.1.1:
version "1.3.0"
resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e"
integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
@@ -14304,24 +13513,16 @@ style-loader@^2.0.0:
loader-utils "^2.0.0"
schema-utils "^3.0.0"
+style-mod@^4.0.0, style-mod@^4.1.0:
+ version "4.1.2"
+ resolved "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz#ca238a1ad4786520f7515a8539d5a63691d7bf67"
+ integrity sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==
+
styled_string@0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
integrity sha512-DU2KZiB6VbPkO2tGSqQ9n96ZstUPjW7X4sGO6V2m1myIQluX0p1Ol8BrA/l6/EesqhMqXOIXs3cJNOy1UuU2BA==
-sucrase@^3.32.0:
- version "3.35.0"
- resolved "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz#57f17a3d7e19b36d8995f06679d121be914ae263"
- integrity sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==
- dependencies:
- "@jridgewell/gen-mapping" "^0.3.2"
- commander "^4.0.0"
- glob "^10.3.10"
- lines-and-columns "^1.1.6"
- mz "^2.7.0"
- pirates "^4.0.1"
- ts-interface-checker "^0.1.9"
-
sum-up@^1.0.1:
version "1.0.3"
resolved "https://registry.npmjs.org/sum-up/-/sum-up-1.0.3.tgz#1c661f667057f63bcb7875aa1438bc162525156e"
@@ -14397,6 +13598,11 @@ tabbable@^5.3.3:
resolved "https://registry.npmjs.org/tabbable/-/tabbable-5.3.3.tgz#aac0ff88c73b22d6c3c5a50b1586310006b47fbf"
integrity sha512-QD9qKY3StfbZqWOPLp0++pOrAVb/HbUi5xCc8cUo4XjP19808oaMiDzn0leBY5mCespIBM0CIZePzZjgzR83kA==
+tabbable@^6.2.0:
+ version "6.2.0"
+ resolved "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz#732fb62bc0175cfcec257330be187dcfba1f3b97"
+ integrity sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==
+
table@^6.0.9:
version "6.8.2"
resolved "https://registry.npmjs.org/table/-/table-6.8.2.tgz#c5504ccf201213fa227248bdc8c5569716ac6c58"
@@ -14408,34 +13614,6 @@ table@^6.0.9:
string-width "^4.2.3"
strip-ansi "^6.0.1"
-tailwindcss@^3.1.8:
- version "3.4.4"
- resolved "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.4.tgz#351d932273e6abfa75ce7d226b5bf3a6cb257c05"
- integrity sha512-ZoyXOdJjISB7/BcLTR6SEsLgKtDStYyYZVLsUtWChO4Ps20CBad7lfJKVDiejocV4ME1hLmyY0WJE3hSDcmQ2A==
- dependencies:
- "@alloc/quick-lru" "^5.2.0"
- arg "^5.0.2"
- chokidar "^3.5.3"
- didyoumean "^1.2.2"
- dlv "^1.1.3"
- fast-glob "^3.3.0"
- glob-parent "^6.0.2"
- is-glob "^4.0.3"
- jiti "^1.21.0"
- lilconfig "^2.1.0"
- micromatch "^4.0.5"
- normalize-path "^3.0.0"
- object-hash "^3.0.0"
- picocolors "^1.0.0"
- postcss "^8.4.23"
- postcss-import "^15.1.0"
- postcss-js "^4.0.1"
- postcss-load-config "^4.0.1"
- postcss-nested "^6.0.1"
- postcss-selector-parser "^6.0.11"
- resolve "^1.22.2"
- sucrase "^3.32.0"
-
tap-parser@^7.0.0:
version "7.0.0"
resolved "https://registry.npmjs.org/tap-parser/-/tap-parser-7.0.0.tgz#54db35302fda2c2ccc21954ad3be22b2cba42721"
@@ -14445,7 +13623,7 @@ tap-parser@^7.0.0:
js-yaml "^3.2.7"
minipass "^2.2.0"
-tapable@^1.0.0, tapable@^1.1.3:
+tapable@^1.0.0:
version "1.1.3"
resolved "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2"
integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==
@@ -14491,21 +13669,6 @@ temp@0.9.4:
mkdirp "^0.5.1"
rimraf "~2.6.2"
-terser-webpack-plugin@^1.4.3:
- version "1.4.5"
- resolved "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz#a217aefaea330e734ffacb6120ec1fa312d6040b"
- integrity sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==
- dependencies:
- cacache "^12.0.2"
- find-cache-dir "^2.1.0"
- is-wsl "^1.1.0"
- schema-utils "^1.0.0"
- serialize-javascript "^4.0.0"
- source-map "^0.6.1"
- terser "^4.1.2"
- webpack-sources "^1.4.0"
- worker-farm "^1.7.0"
-
terser-webpack-plugin@^5.3.10:
version "5.3.10"
resolved "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz#904f4c9193c6fd2a03f693a2150c62a92f40d199"
@@ -14517,15 +13680,6 @@ terser-webpack-plugin@^5.3.10:
serialize-javascript "^6.0.1"
terser "^5.26.0"
-terser@^4.1.2:
- version "4.8.1"
- resolved "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz#a00e5634562de2239fd404c649051bf6fc21144f"
- integrity sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==
- dependencies:
- commander "^2.20.0"
- source-map "~0.6.1"
- source-map-support "~0.5.12"
-
terser@^5.26.0, terser@^5.7.0:
version "5.31.1"
resolved "https://registry.npmjs.org/terser/-/terser-5.31.1.tgz#735de3c987dd671e95190e6b98cfe2f07f3cf0d4"
@@ -14595,28 +13749,6 @@ text-table@^0.2.0:
resolved "https://registry.npmjs.org/textextensions/-/textextensions-2.6.0.tgz#d7e4ab13fe54e32e08873be40d51b74229b00fc4"
integrity sha512-49WtAWS+tcsy93dRt6P0P3AMD2m5PvXRhuEA0kaXos5ZLlujtYmpmFsB+QvWUSxE1ZsstmYXfQ7L40+EcQgpAQ==
-thenify-all@^1.0.0:
- version "1.6.0"
- resolved "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726"
- integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==
- dependencies:
- thenify ">= 3.1.0 < 4"
-
-"thenify@>= 3.1.0 < 4":
- version "3.3.1"
- resolved "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f"
- integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==
- dependencies:
- any-promise "^1.0.0"
-
-through2@^2.0.0:
- version "2.0.5"
- resolved "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
- integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
- dependencies:
- readable-stream "~2.3.6"
- xtend "~4.0.1"
-
through2@^3.0.1:
version "3.0.2"
resolved "https://registry.npmjs.org/through2/-/through2-3.0.2.tgz#99f88931cfc761ec7678b41d5d7336b5b6a07bf4"
@@ -14630,13 +13762,6 @@ through@^2.3.6, through@^2.3.8:
resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==
-timers-browserify@^2.0.4:
- version "2.0.12"
- resolved "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz#44a45c11fbf407f34f97bccd1577c652361b00ee"
- integrity sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==
- dependencies:
- setimmediate "^1.0.4"
-
tiny-emitter@^2.0.0:
version "2.1.0"
resolved "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz#1d1a56edfc51c43e863cbb5382a72330e3555423"
@@ -14700,11 +13825,6 @@ tmpl@1.0.5:
resolved "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc"
integrity sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==
-to-arraybuffer@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43"
- integrity sha512-okFlQcoGTi4LQBG/PgSYblw9VOyptsz2KJZqc6qtgGdes8VktzUQkj4BI2blit072iS8VODNcMA+tvnS9dnuMA==
-
to-fast-properties@^1.0.3:
version "1.0.3"
resolved "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47"
@@ -14715,13 +13835,6 @@ to-fast-properties@^2.0.0:
resolved "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e"
integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==
-to-object-path@^0.3.0:
- version "0.3.0"
- resolved "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af"
- integrity sha512-9mWHdnGRuh3onocaHzukyvCZhzvr6tiflAy/JRFXcJX0TjgfWA9pk9t8CMbzmBE4Jfw58pXbkngtBtqYxzNEyg==
- dependencies:
- kind-of "^3.0.2"
-
to-regex-range@^5.0.1:
version "5.0.1"
resolved "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4"
@@ -14729,16 +13842,6 @@ to-regex-range@^5.0.1:
dependencies:
is-number "^7.0.0"
-to-regex@^3.0.1, to-regex@^3.0.2:
- version "3.0.2"
- resolved "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce"
- integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==
- dependencies:
- define-property "^2.0.2"
- extend-shallow "^3.0.2"
- regex-not "^1.0.2"
- safe-regex "^1.1.0"
-
to-vfile@^6.1.0:
version "6.1.0"
resolved "https://registry.npmjs.org/to-vfile/-/to-vfile-6.1.0.tgz#5f7a3f65813c2c4e34ee1f7643a5646344627699"
@@ -14854,11 +13957,6 @@ trough@^1.0.0, trough@^1.0.5:
resolved "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406"
integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==
-ts-interface-checker@^0.1.9:
- version "0.1.13"
- resolved "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz#784fd3d679722bc103b1b4b8030bcddb5db2a699"
- integrity sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==
-
tslib@^1.9.0:
version "1.14.1"
resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
@@ -14869,11 +13967,6 @@ tslib@^2.0.3, tslib@^2.1.0:
resolved "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0"
integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==
-tty-browserify@0.0.0:
- version "0.0.0"
- resolved "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6"
- integrity sha512-JVa5ijo+j/sOoHGjw0sxw734b1LhBkQ3bvUGNdxnVXDCX81Yx7TFgnZygxrIIWn23hbfTaMYLwRmAxFyDuFmIw==
-
type-check@^0.4.0, type-check@~0.4.0:
version "0.4.0"
resolved "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1"
@@ -14972,11 +14065,6 @@ typedarray.prototype.slice@^1.0.3:
typed-array-buffer "^1.0.2"
typed-array-byte-offset "^1.0.2"
-typedarray@^0.0.6:
- version "0.0.6"
- resolved "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
- integrity sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==
-
typescript-memoize@^1.0.0-alpha.3, typescript-memoize@^1.0.1:
version "1.1.1"
resolved "https://registry.npmjs.org/typescript-memoize/-/typescript-memoize-1.1.1.tgz#02737495d5df6ebf72c07ba0d002e8f4cf5ccfa0"
@@ -15055,30 +14143,6 @@ unified@^9.0.0, unified@^9.2.2:
trough "^1.0.0"
vfile "^4.0.0"
-union-value@^1.0.0:
- version "1.0.1"
- resolved "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz#0b6fe7b835aecda61c6ea4d4f02c14221e109847"
- integrity sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==
- dependencies:
- arr-union "^3.1.0"
- get-value "^2.0.6"
- is-extendable "^0.1.1"
- set-value "^2.0.1"
-
-unique-filename@^1.1.1:
- version "1.1.1"
- resolved "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230"
- integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==
- dependencies:
- unique-slug "^2.0.0"
-
-unique-slug@^2.0.0:
- version "2.0.2"
- resolved "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c"
- integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==
- dependencies:
- imurmurhash "^0.1.4"
-
unique-string@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d"
@@ -15158,14 +14222,6 @@ unpipe@1.0.0, unpipe@~1.0.0:
resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
-unset-value@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559"
- integrity sha512-PcA2tsuGSF9cnySLHTLSh2qrQiJ70mn+r+Glzxv2TWZblxsxCC52BDlZoPCsz7STd9pN7EZetkWZBAvk4cgZdQ==
- dependencies:
- has-value "^0.3.1"
- isobject "^3.0.0"
-
untildify@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0"
@@ -15173,11 +14229,6 @@ untildify@^2.1.0:
dependencies:
os-homedir "^1.0.0"
-upath@^1.1.1:
- version "1.2.0"
- resolved "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz#8f66dbcd55a883acdae4408af8b035a5044c1894"
- integrity sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==
-
update-browserslist-db@^1.0.16:
version "1.1.0"
resolved "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e"
@@ -15198,11 +14249,6 @@ uri-js@^4.2.2, uri-js@^4.4.1:
dependencies:
punycode "^2.1.0"
-urix@^0.1.0:
- version "0.1.0"
- resolved "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72"
- integrity sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==
-
url-parse@^1.5.3:
version "1.5.10"
resolved "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1"
@@ -15211,19 +14257,6 @@ url-parse@^1.5.3:
querystringify "^2.1.1"
requires-port "^1.0.0"
-url@^0.11.0:
- version "0.11.3"
- resolved "https://registry.npmjs.org/url/-/url-0.11.3.tgz#6f495f4b935de40ce4a0a52faee8954244f3d3ad"
- integrity sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw==
- dependencies:
- punycode "^1.4.1"
- qs "^6.11.2"
-
-use@^3.1.0:
- version "3.1.1"
- resolved "https://registry.npmjs.org/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f"
- integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==
-
username-sync@^1.0.2:
version "1.0.3"
resolved "https://registry.npmjs.org/username-sync/-/username-sync-1.0.3.tgz#ae41c5c8a4c8c2ecc1443a7d0742742bd7e36732"
@@ -15239,20 +14272,6 @@ util-extend@^1.0.1:
resolved "https://registry.npmjs.org/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
integrity sha512-mLs5zAK+ctllYBj+iAQvlDCwoxU/WDOUaJkcFudeiAX6OajC6BKXJUa9a+tbtkC11dz2Ufb7h0lyvIOVn4LADA==
-util@^0.10.4:
- version "0.10.4"
- resolved "https://registry.npmjs.org/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901"
- integrity sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==
- dependencies:
- inherits "2.0.3"
-
-util@^0.11.0:
- version "0.11.1"
- resolved "https://registry.npmjs.org/util/-/util-0.11.1.tgz#3236733720ec64bb27f6e26f421aaa2e1b588d61"
- integrity sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==
- dependencies:
- inherits "2.0.3"
-
utils-merge@1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713"
@@ -15319,11 +14338,6 @@ vfile@^4.0.0:
unist-util-stringify-position "^2.0.0"
vfile-message "^2.0.0"
-vm-browserify@^1.0.1:
- version "1.1.2"
- resolved "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz#78641c488b8e6ca91a75f511e7a3b32a86e5dda0"
- integrity sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==
-
w3c-hr-time@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd"
@@ -15331,6 +14345,11 @@ w3c-hr-time@^1.0.2:
dependencies:
browser-process-hrtime "^1.0.0"
+w3c-keyname@^2.2.4:
+ version "2.2.8"
+ resolved "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz#7b17c8c6883d4e8b86ac8aba79d39e880f8869c5"
+ integrity sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==
+
w3c-xmlserializer@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz#3e7104a05b75146cc60f564380b7f683acf1020a"
@@ -15399,24 +14418,6 @@ watch-detector@^1.0.0:
silent-error "^1.1.1"
tmp "^0.1.0"
-watchpack-chokidar2@^2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz#38500072ee6ece66f3769936950ea1771be1c957"
- integrity sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==
- dependencies:
- chokidar "^2.1.8"
-
-watchpack@^1.7.4:
- version "1.7.5"
- resolved "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz#1267e6c55e0b9b5be44c2023aed5437a2c26c453"
- integrity sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==
- dependencies:
- graceful-fs "^4.1.2"
- neo-async "^2.5.0"
- optionalDependencies:
- chokidar "^3.4.1"
- watchpack-chokidar2 "^2.0.1"
-
watchpack@^2.4.1:
version "2.4.1"
resolved "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz#29308f2cac150fa8e4c92f90e0ec954a9fed7fff"
@@ -15454,54 +14455,16 @@ webidl-conversions@^6.1.0:
resolved "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-6.1.0.tgz#9111b4d7ea80acd40f5270d666621afa78b69514"
integrity sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w==
-webpack-sources@^1.4.0, webpack-sources@^1.4.1:
- version "1.4.3"
- resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933"
- integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==
- dependencies:
- source-list-map "^2.0.0"
- source-map "~0.6.1"
-
webpack-sources@^3.2.3:
version "3.2.3"
resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde"
integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
-webpack@^4.43.0:
- version "4.47.0"
- resolved "https://registry.npmjs.org/webpack/-/webpack-4.47.0.tgz#8b8a02152d7076aeb03b61b47dad2eeed9810ebc"
- integrity sha512-td7fYwgLSrky3fI1EuU5cneU4+pbH6GgOfuKNS1tNPcfdGinGELAqsb/BP4nnvZyKSG2i/xFGU7+n2PvZA8HJQ==
- dependencies:
- "@webassemblyjs/ast" "1.9.0"
- "@webassemblyjs/helper-module-context" "1.9.0"
- "@webassemblyjs/wasm-edit" "1.9.0"
- "@webassemblyjs/wasm-parser" "1.9.0"
- acorn "^6.4.1"
- ajv "^6.10.2"
- ajv-keywords "^3.4.1"
- chrome-trace-event "^1.0.2"
- enhanced-resolve "^4.5.0"
- eslint-scope "^4.0.3"
- json-parse-better-errors "^1.0.2"
- loader-runner "^2.4.0"
- loader-utils "^1.2.3"
- memory-fs "^0.4.1"
- micromatch "^3.1.10"
- mkdirp "^0.5.3"
- neo-async "^2.6.1"
- node-libs-browser "^2.2.1"
- schema-utils "^1.0.0"
- tapable "^1.1.3"
- terser-webpack-plugin "^1.4.3"
- watchpack "^1.7.4"
- webpack-sources "^1.4.1"
-
-webpack@^5.74.0:
- version "5.92.1"
- resolved "https://registry.npmjs.org/webpack/-/webpack-5.92.1.tgz#eca5c1725b9e189cffbd86e8b6c3c7400efc5788"
- integrity sha512-JECQ7IwJb+7fgUFBlrJzbyu3GEuNBcdqr1LD7IbSzwkSmIevTm8PF+wej3Oxuz/JFBUZ6O1o43zsPkwm1C4TmA==
- dependencies:
- "@types/eslint-scope" "^3.7.3"
+webpack@5.94.0, webpack@^4.43.0, webpack@^5.74.0:
+ version "5.94.0"
+ resolved "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz#77a6089c716e7ab90c1c67574a28da518a20970f"
+ integrity sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==
+ dependencies:
"@types/estree" "^1.0.5"
"@webassemblyjs/ast" "^1.12.1"
"@webassemblyjs/wasm-edit" "^1.12.1"
@@ -15510,7 +14473,7 @@ webpack@^5.74.0:
acorn-import-attributes "^1.9.5"
browserslist "^4.21.10"
chrome-trace-event "^1.0.2"
- enhanced-resolve "^5.17.0"
+ enhanced-resolve "^5.17.1"
es-module-lexer "^1.2.1"
eslint-scope "5.1.1"
events "^3.2.0"
@@ -15642,13 +14605,6 @@ wordwrap@^1.0.0:
resolved "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==
-worker-farm@^1.7.0:
- version "1.7.0"
- resolved "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz#26a94c5391bbca926152002f69b84a4bf772e5a8"
- integrity sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==
- dependencies:
- errno "~0.1.7"
-
workerpool@^2.3.0:
version "2.3.4"
resolved "https://registry.npmjs.org/workerpool/-/workerpool-2.3.4.tgz#661335ded59a08c01ca009e30cc96929a7b4b0aa"
@@ -15665,7 +14621,7 @@ workerpool@^3.1.1:
object-assign "4.1.1"
rsvp "^4.8.4"
-workerpool@^6.0.2, workerpool@^6.1.4, workerpool@^6.1.5:
+workerpool@^6.1.4, workerpool@^6.1.5:
version "6.5.1"
resolved "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz#060f73b39d0caf97c6db64da004cd01b4c099544"
integrity sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==
@@ -15743,16 +14699,11 @@ xmlhttprequest-ssl@^1.6.3:
resolved "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.3.tgz#03b713873b01659dfa2c1c5d056065b27ddc2de6"
integrity sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q==
-xtend@^4.0.0, xtend@~4.0.1:
+xtend@^4.0.0:
version "4.0.2"
resolved "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"
integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
-y18n@^4.0.0:
- version "4.0.3"
- resolved "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf"
- integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==
-
y18n@^5.0.5:
version "5.0.8"
resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55"
@@ -15786,11 +14737,6 @@ yaml@^1.10.0, yaml@^1.9.2:
resolved "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b"
integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==
-yaml@^2.3.4:
- version "2.4.5"
- resolved "https://registry.npmjs.org/yaml/-/yaml-2.4.5.tgz#60630b206dd6d84df97003d33fc1ddf6296cca5e"
- integrity sha512-aBx2bnqDzVOyNKfsysjA2ms5ZlnjSAW2eG3/L5G/CSujfjLJTJsEw1bGw8kCf04KodQWk1pxlGnZ56CRxiawmg==
-
yargs-parser@^20.2.2:
version "20.2.9"
resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee"
diff --git a/version/VERSION b/version/VERSION
index b148ac3829b7..0b996293dc4c 100644
--- a/version/VERSION
+++ b/version/VERSION
@@ -1 +1 @@
-1.21.0-dev
\ No newline at end of file
+1.21.4-dev
\ No newline at end of file
diff --git a/website/.gitignore b/website/.gitignore
index 7d809dab7e15..c16b381f857c 100644
--- a/website/.gitignore
+++ b/website/.gitignore
@@ -8,3 +8,4 @@ out
.env*.local
website-preview
+.vercel
diff --git a/website/content/README.md b/website/content/README.md
new file mode 100644
index 000000000000..21f6fcf83af4
--- /dev/null
+++ b/website/content/README.md
@@ -0,0 +1,607 @@
+# Information architecture and content strategy for Consul documentation
+
+The `website/content` directory in the `hashicorp/consul` repository contains [the Consul documentation on developer.hashicorp.com](https://developer.hashicorp.com/consul). This `README` describes the directory structure and design principles for this documentation set.
+
+`README` table of contents:
+
+- [Content directory overview](#content-directory-overview)
+- [North star principles for content design](#north-star-principles)
+- [Consul content strategy](#content-strategy), including user persona and jobs-to-be-done
+- [Consul taxonomy](#taxonomy)
+- [Path syntax](#path-syntax) for directory name and nesting guidelines
+- [Controlled vocabularies](#controlled-vocabularies) for Consul terms and labeling standards
+- [Guide to partials](#guide-to-partials)
+- [How to document new Consul features](#how-to-document-new-consul-features)
+- [Maintaining and deprecating content](#maintaining-and-deprecating-content)
+
+To update the contents of this document, create a PR against the `main` branch of the `hashicorp/consul` GitHub repository. Apply the label `type/docs` to the PR to request review from an approver in the `consul-docs` group.
+
+## Content directory overview
+
+The `website/content` directory in the `hashicorp/consul` GitHub repo contains the following sub-directories:
+
+```
+.
+├── api-docs
+├── commands
+├── docs
+└── partials
+```
+
+After you merge a PR into a numbered release branch, changes to these folders appear at the following URLs:
+
+- Changes to `api-docs` appear at [https://developer.hashicorp.com/consul/api-docs](https://developer.hashicorp.com/consul/api-docs).
+- Changes to `commands` appear at [https://developer.hashicorp.com/consul/commands](https://developer.hashicorp.com/consul/commands).
+- Changes to `docs` appear at [https://developer.hashicorp.com/consul/docs](https://developer.hashicorp.com/consul/docs).
+
+URLs follow the directory structure for each file and omit the the `.mdx` file extension. Pages named `index.mdx` adopt their directory's name. For example, the file `docs/reference/agent/configuration-file/index.mdx` appears at the URL [https://developer.hashicorp.com/consul/docs/reference/agent/configuration-file](https://developer.hashicorp.com/consul/docs/reference/agent/configuration-file).
+
+The `partials` folder includes content that you can reuse across pages in any of the three folders. Refer to [Guide to Partials](#guide-to-partials) for more information.
+
+Tutorials that appear at [https://developer.hashicorp.com/consul/tutorials](https://developer.hashicorp.com/consul/tutorials) are located in a different repository. This content exists in the [hashicorp/tutorials GitHub repo](https://github.com/hashicorp/tutorials), which is internal to the HashiCorp organization.
+
+### Other directories of note
+
+The `website/data` directory contains `.json` files that populate the navigation sidebar on [developer.hashicorp.com](https://developer.hashicorp.com).
+
+The `website/public/img` directory contains the images used in the documentation.
+
+Instructions on editing these files, including instructions on running local builds of the documentation, are in the `README` for the `website` directory, one level above this one.
+
+## North Star principles
+
+The design of the content in the `docs/` directory, including structure, file paths, and labels, is governed by the following _north star principles_.
+
+1. **Users are humans**. Design for humans first. For example, file paths become URLs; create human-readable descriptions of the content and avoid unnecessary repetition.
+1. **Less is always more**. Prefer single words for folder and file names; add a hyphen and a second word to disambiguate from existing content.
+1. **Document what currently exists**. Do not create speculative folders and files to "reserve space" for future updates and releases. Do not describe Consul as it will exist in the future; describe it as it exists right now, in the latest release.
+1. **Beauty works better**. When creating new files and directories, strive for consistency with the existing structure. For example, use parallel structures across directories and flatten directories that run too deep. Tip: If it doesn't look right, it's probably not right.
+1. **Prefer partials over `ctrl+v`**. Spread content out, but document unique information in one place. When you need to repeat content across multiple pages, use partials to maintain content.
+
+These principles exist to help you navigate ambiguity when making changes to the underlying content. If you add new content and you're not quite sure where to place it or how to name it, use these "north stars" to help you make an informed decision about what to do.
+
+Over time, Consul may change in ways that require significant edits to this information architecture. The IA and content strategy were designed with this possibility in mind. Use these north star principles to help you make informed (and preferably incremental) changes over time.
+
+## Content strategy
+
+Consul's content strategy centers on three main considerations:
+
+- **User persona** considers the roles of Consul users in DevOps workflows, which may be either broad or narrowly defined.
+- **Jobs-to-be-done** includes the practical outcomes users want to achieve when using Consul to address a latent concern.
+- **Content type** asks what kind of content exists on the page, and follows the principles of Diataxis.
+
+You should keep all three of the considerations in mind when creating new content or updating existing content in the documentation and tutorials. Ask yourself the following questions to help you determine your content needs:
+
+- Who will use this documentation?
+- What concerns will that person have?
+- What goals are they trying to accomplish?
+- What kind of content would help the user achieve their goal?
+
+For more information about recommended workflow patterns, refer to [How to document new Consul features](#how-to-document-new-consul-features) and [Maintaining and deprecating content](#maintaining-and-deprecating-content).
+
+### User personas, jobs-to-be-done, and critical user journeys
+
+Consul is a flexible service networking tool, with applications across DevOps workflows. The following table lists Consul's user personas, examples of their major concerns, and typical jobs that this user wants to complete using Consul's features.
+
+| User persona | Jobs-to-be-done | Critical user journeys |
+| :-------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Application developer | • Application is discoverable across on-prem and cloud environments. • Applications can reliably find and connect to dependent upstream services. | • I want to use Consul to register the locations of my applications across infrastructure environments so that they can be discovered by downstream applications. • I want to define intelligent failover policies for my applications so that my services are highly available and fault tolerant. • I want to use Consul's service catalog find and connect to healthy upstream services across multiple clouds and runtime environments. |
+| Platform engineer | • Architect global service registry that makes services avilable regardless of infrastructure. • Reliability and availability of the service registry so that I can meet service level objectives. | • I want to implement monitoring and alerting for Consul in order to quickly identify and resolve cluster instability, or service connectivity issues. • I want to automate recovery procedures and ensure resilience in order to meet SLO and SLA objectives. • I want to interconnect Consul clusters to enable unified service discovery across cloud and on-premises environments. •I want to provide guidance and guardrails for enabling application developers to register services into Consul, and ensure their services are highly available. |
+| Security engineer | • Ensure that critical infrastructure and services adhere to corporate security policies. | • I want to ensure that communication between critical infrastructure services is encrypted in transit, and access to systems is appropriately controlled and audited. • I want to integrate Consul authentication with existing corporate identity and access management (IAM) systems. • I want to ensure that users have least privileged, but sufficient access to Consul for managing and discovering services. |
+
+### Content types
+
+The content we create and host on developer.hashicorp.com follows the principles of the [Diátaxis method for structured documentation](https://diataxis.fr/), which use the following basic content types:
+
+- Explanation
+- Usage
+- Reference
+- Tutorials
+
+Because tutorials are hosted in a separate repository, this README focuses on the first three content types.
+
+Within the "Explanation" category, we use three different types of pages, each of which has a distinct purpose.
+
+- **Index** pages provide lists of links to supporting documentation on a subject. [Example: Deploy Consul](https://developer.hashicorp.com/consul/docs/deploy)
+- **Overview** pages provide an introduction to a subject and serve as a central information point. [Example: Expand service network east/west](https://developer.hashicorp.com/consul/docs/east-west)
+- **Concept** pages provide discursive explanations of Consul's underlying systems and their operations. [Example: Consul catalog](https://developer.hashicorp.com/consul/docs/concept/catalog)
+
+## Taxonomy
+
+There are three main categories in the Consul docs information architecture. This division of categories is _not literal_ to the directory structure, even though the **Reference** category includes the repository's `reference` folder.
+
+- Intro
+- Usage
+- Reference
+
+These categories intentionally align with [Diataxis](https://diataxis.fr/).
+
+The following diagram summarizes the taxonomy of the Consul documentation:
+
+
+
+In this image, the blue boxes represent literal directories. The salmon and purple boxes around them are figurative categories that are not literally represented in the file structure.
+
+### Intro
+
+The **Intro** category includes the following folders in `website/content/docs/`:
+
+- `architecture`
+- `concept`
+- `enterprise`
+- `fundamentals`
+- `use-case`
+
+The following table lists each term and a definition to help you decide where to place new content.
+
+| Term | Directory | What it includes |
+| :----------- | :------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Architecture | `architecture` | The product's components and their “maps” in cloud networking contexts. |
+| Concepts | `concept` | Describes the complex behavior of technical systems in a non-literal manner. For example, computers do not literally “gossip” when they use the gossip protocol. |
+| Enterprise | `enterprise` | Consul Enterprise license offerings and how to implement them. |
+| Fundamentals | `fundamentals` | The knowledge, connection and authorization methods, interactions, configurations, and operations most users require to use the product. |
+| Use cases | `use-case` | The highest level goals practitioners have; a product function that “solves” enterprise concerns and usually competes with other products. |
+
+#### User persona indexed to intro topic
+
+This table indexes each intro directory and its contents with the typical concerns of the user persona based on their jobs-to-be-done and critical user journeys:
+
+| Intro topic | Platform engineer | Security engineer | Application developer |
+| :----------- | :---------------: | :---------------: | :-------------------: |
+| Architecture | ✅ | ✅ | ❌ |
+| Concepts | ✅ | ✅ | ✅ |
+| Enterprise | ✅ | ❌ | ❌ |
+| Fundamentals | ✅ | ✅ | ✅ |
+| Use cases | ✅ | ✅ | ✅ |
+
+The purpose of this table is to validate the relationship between the information architecture and the content strategy by indexing them to one another. Potential applications for this table include curricular learning paths and targeted content expansion.
+
+### Usage
+
+The **Usage** category includes the following folders in `website/content/docs/`:
+
+- `automate`
+- `connect`
+- `deploy`
+- `discover`
+- `east-west`
+- `envoy-extension`
+- `integrate`
+- `manage`
+- `manage-traffic`
+- `monitor`
+- `multi-tenant`
+- `north-south`
+- `observe`
+- `register`
+- `release-notes`
+- `secure`
+- `secure-mesh`
+- `upgrade`
+
+These folders are organized into two groups that are _not literal_ to the directory structure, but are reflected in the navigation bar.
+
+- **Operations**. User actions, workflows, and goals related to installing and operating Consul as a long-running daemon on multiple nodes in a network.
+- **Service networking**. User actions, workflows, and goals related to networking solutions for application workloads.
+
+Each folder is named after a corresponding _phase_, which have a set order in the group.
+
+#### Operations
+
+Operations consists of the following phases, intentionally ordered to reflect the full lifecycle of a Consul agent.
+
+| Phase | Directory | Description |
+| :------------------- | :-------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Deploy Consul | `deploy` | The processes to install and start Consul server agents, client agents, and dataplanes. |
+| Secure Consul | `secure` | The processes to set up and maintain secure communications with Consul agents, including ACLs, TLS, and gossip. |
+| Manage multi-tenancy | `multi-tenant` | The processes to use one Consul datacenter for multiple tenants, including admin partitions, namespaces, network segments, and sameness groups. |
+| Manage Consul | `manage` | The processes to manage and customize Consul's behavior, including DNS forwarding on nodes, server disaster recovery, rate limiting, and scaling options. |
+| Monitor Consul | `monitor` | The processes to export Consul logs and telemetry for insight into agent behavior. |
+| Upgrade Consul | `upgrade` | The processes to update the Consul version running in datacenters. |
+| Release Notes | `release-notes` | Describes new, changed, and deprecated features for each release of Consul and major associated binaries. |
+
+#### Service networking
+
+Service Networking consists of the following phases, intentionally ordered to reflect a recommended “order of operations.” Although these phases do not need to be completed in order, their order reflects a general path for increasing complexity in Consul’s service networking capabilities as you develop your network.
+
+| Phase | Directory | Description |
+| :--------------------- | :--------------- | :--------------------------------------------------------------------------------------------------------------------------------------- |
+| Register services | `register` | How to define services and health checks and then register them with Consul. |
+| Discover services | `discover` | How to use Consul's service discovery features, including Consul DNS, service lookups, load balancing. |
+| Connect service mesh | `connect` | How to set up and use sidecar proxies in a Consul service mesh. |
+| Secure north/south | `north-south` | How to configure, deploy, and use the Consul API gateway to secure network ingress. |
+| Expand east/west | `east-west` | How to connect Consul datacenters across regions, runtimes, and providers with WAN federation and cluster peering. |
+| Secure mesh traffic | `secure-mesh` | How to secure service-to-service communication with service intentions and TLS certificates. |
+| Manage service traffic | `manage-traffic` | How to route traffic between services in a service mesh, including service failover and progressive rollouts. |
+| Observe service mesh | `observe` | How to observe service mesh telemetry and application performance, including Grafana. |
+| Automate applications | `automate` | How to automate Consul and applications to update dynamically, including the KV store, Consul-Terraform-Sync (CTS), and Consul template. |
+
+#### User persona indexed to usage phase
+
+This table indexes each usage directory and its contents with the typical concerns of the user persona based on their jobs-to-be-done and critical user journeys:
+
+| Usage phase | Platform engineer | Security engineer | Application developer |
+| :--------------------- | :---------------: | :---------------: | :-------------------: |
+| Deploy Consul | ✅ | ✅ | ❌ |
+| Secure Consul | ✅ | ✅ | ❌ |
+| Manage multi-tenancy | ✅ | ✅ | ❌ |
+| Manage Consul | ✅ | ❌ | ❌ |
+| Monitor Consul | ✅ | ✅ | ❌ |
+| Upgrade Consul | ✅ | ❌ | ❌ |
+| Release Notes | ✅ | ❌ | ❌ |
+| Register services | ✅ | ❌ | ✅ |
+| Discover services | ✅ | ❌ | ✅ |
+| Connect service mesh | ✅ | ❌ | ❌ |
+| Secure north/south | ✅ | ✅ | ❌ |
+| Expand east/west | ✅ | ✅ | ❌ |
+| Secure mesh traffic | ✅ | ✅ | ❌ |
+| Manage service traffic | ✅ | ❌ | ❌ |
+| Observe service mesh | ✅ | ❌ | ❌ |
+| Automate applications | ✅ | ❌ | ✅ |
+
+The purpose of this table is to validate the relationship between the information architecture and the content strategy by indexing them to one another. Potential applications for this table include curricular learning paths and targeted content expansion.
+
+### Reference
+
+The **Reference** category includes the following folders in `website/content/docs/`:
+
+- `error-messages`
+- `reference`
+- `troubleshoot`
+
+The following table lists each term and a definition to help you decide where to place new content.
+
+| Term | Directory | What it includes |
+| :------------- | :---------------- | :--------------------------------------------------------------------------------------------------- |
+| Error Messages | `error-messsages` | Error messages and their causes, organized by runtime and Consul release binary. |
+| Reference | `reference` | All reference information for configuring Consul, its components, and the infrastructure it runs on. |
+| Troubleshoot | `troubleshoot` | Instructions and guidance about how to figure out what's wrong with a Consul deployment. |
+
+### User persona indexed to reference subject
+
+This table indexes each reference and its contents with the typical concerns of the user persona based on their jobs-to-be-done and critical user journeys.
+
+| Reference subject | Platform engineer | Security engineer | Application developer |
+| :----------------------- | :---------------: | :---------------: | :-------------------: |
+| Error messages | ✅ | ❌ | ❌ |
+| Reference specifications | ✅ | ✅ | ✅ |
+| Troubleshoot | ✅ | ❌ | ✅ |
+
+The purpose of this table is to validate the relationship between the information architecture and the content strategy by indexing them to one another. Potential applications for this table include curricular learning paths and targeted content expansion.
+
+## Path syntax
+
+A major advantage of this information architecture is the filepath structure. This structure "tags" documentation with keywords that describe the page's content to optimize the documentation for Google SEO while also helping human users build a "mental model" of Consul.
+
+Our syntax creates human-readable names for file paths using a controlled vocabulary and intentional permutations. In general, the syntax follows a repeating pattern of `Verb / Noun / Adjective` to describe increasingly specific content and user goals.
+
+For **Consul operations**, filepaths assume the following syntax:
+
+
+
+```plaintext
+Phase / Component / Runtime / Action / Provider
+```
+
+
+
+Examples:
+
+- `secure/encryption/tls/rotate/vm` contains instructions for rotating TLS certificates Consul agents use to secure their communications when running on virtual machines.
+- `deploy/server/k8s/platform/openshift` contains instructions on deploying a Consul server agent when running OpenShift.
+- `upgrade/k8s/compatibility` contains information about compatible software versions to help you upgrade the version of Consul running on Kubernetes.
+
+For **service networking**, filepaths tend to have the following order:
+
+
+
+```plaintext
+Phase / Feature / Action / Runtime / Interface
+```
+
+
+
+Examples:
+
+- `discover/load-balancer/nginx` contains instructions for using NGINX as a load balancer based on Consul service discovery results.
+- `east-west/cluster-peering/establish/k8s` contains instructions for creating new connections between Consul clusters running on Kubernetes in different regions or cloud providers.
+- `register/service/k8s/external` contains information about how to register services running on external nodes to Consul on Kubernetes by configuring them to join the Consul datacenter.
+- `register/external/esm/k8s` contains information about registering services running on external nodes to Consul on Kubernetes using the External Services Manager (ESM).
+
+## Controlled vocabularies
+
+This section lists the standard names for files and directories, divided into sub-groups based on the syntax guide in this `README`. The following list provides links to specific vocabulary groups:
+
+- [Architecture vocabulary](#architecture-vocabulary)
+- [Concepts vocabulary](#concepts-vocabulary)
+- [Use case vocabulary](#use-case-vocabulary)
+- [Components vocabulary](#components-vocabulary)
+- [Features vocabulary](#features-vocabulary)
+- [Runtimes vocabularly](#runtimes-vocabulary)
+- [Actions vocabulary](#actions-vocabulary)
+- [Providers vocabulary](#providers-vocabulary)
+- [Interfaces vocabulary](#interfaces-vocabulary)
+- [Configuration entry vocabulary](#configuration-entry-vocabulary)
+- [Envoy extension vocabulary](#envoy-extension-vocabulary)
+
+### Architecture vocabulary
+
+Consul's _architecture_ vocabulary is structured according to where components run:
+
+- `control-plane`: The _control plane_ is the network infrastructure that maintains a central registry to track services and their respective IP addresses. Both server and client agents operate as part of the control plane. Consul dataplanes, despite the name, are also part of the Consul control plane.
+- `data-plane`: Use two words, _data plane_, to refer to the application layer and components involved in service-to-service communication.
+
+Common architecture terms and where they run:
+
+| Control plane | Data plane |
+| :------------- | :--------- |
+| `agent` | `gateway` |
+| `server agent` | `mesh` |
+| `client agent` | `proxy` |
+| `dataplane` | `service` |
+
+The **Reference** category also includes an `architecture` sub-directory. This "Reference architecture" includes information such as port requirements, server requirements, and AWS ECS architecture.
+
+### Concepts vocabulary
+
+Consul's _concepts_ vocabulary collects terms that describe how internal systems operate through human actions.
+
+| Concept | Label | Description |
+| :-------------------------- | :------------ | :------------------------------------------------------------------------------------------------ |
+| Consul catalog | `catalog` | Covers Consul's running service registry, which includes node addresses and health check results. |
+| Consensus protocol (Raft) | `consensus` | Covers the server agent elections governed by the Raft protocol. |
+| Cluster consistency | `consistency` | Covers Consul's anti-entropy features, consistency modes, and Jepsen testing. |
+| Gossip communication (Serf) | `gossip` | Covers Serf communication between Consul agents in a datacenter. |
+| Datacenter reliability | `reliability` | Covers fault tolerance, quorum size, and server redundancy. |
+
+### Use case vocabulary
+
+Consul's _use case_ vocabulary collects terms that describe the highest-level goals users have that would lead them to choose Consul as their networking solution.
+
+| Use case | Label |
+| :-------------------------------- | :------------------ |
+| Service discovery | `service-discovery` |
+| Service mesh | `service-mesh` |
+| API gateway security | `api-gateway` |
+| Configuration management tooling | `config-management` |
+| Domain Name Service (DNS) tooling | `dns` |
+
+### Components vocabulary
+
+Consul's _components_ vocabulary collects terms that describe Consul's built-in components, enterprise offerings, and other offerings that impact the operations of Consul agent clusters.
+
+| Component | Label |
+| :--------------------------- | :------------------ |
+| Access Control List (ACL) | `acl` |
+| Admin partition | `admin-partition` |
+| Audit logs | `audit-log` |
+| Automated backups | `automated-backup` |
+| Automated upgrades | `automated-upgrade` |
+| Auth methods | `auth-method` |
+| Cloud auto-join | `cloud-auto-join` |
+| Consul-Terraform-Sync | `cts` |
+| DNS | `dns` |
+| FIPS | `fips` |
+| JSON Web Token Authorization | `jwt-auth` |
+| Consul Enterprise License | `license` |
+| Long term support (LTS) | `lts` |
+| Namespaces | `namespace` |
+| Network areas | `network-area` |
+| Network segments | `network-segment` |
+| OIDC Authorization | `oidc-auth` |
+| Agent rate limits | `rate-limit` |
+| Read repilicas | `read-replica` |
+| Redundancy zones | `redundancy-zone` |
+| Sentinel policies | `sentinel` |
+| Agent snapshots | `snapshot` |
+| Single sign on (SSO) | `sso` |
+
+### Features vocabulary
+
+Consul's _features_ vocabulary collects terms that describe Consul product offerings related to service networking for application workloads.
+
+| Feature | Label |
+| :------------------------------------------------ | :--------------------- |
+| Cluster peering | `cluster-peering` |
+| Consul template | `consul-template` |
+| Consul DNS | `dns` |
+| Discovery chain | `discovery-chain` |
+| Distributed tracing | `distributed-tracing` |
+| External services manager (ESM) | `esm` |
+| Failover | `failover` |
+| Health checks | `health-check` |
+| Service intentions | `intention` |
+| Ingress gateway (deprecated) | `ingress-gateway` |
+| Key/value store | `kv` |
+| Load balancing | `load-balancer` |
+| Logs | `log` |
+| Mesh gateways | `mesh-gateway` |
+| Mutual Transport Layer Security (mTLS) encryption | `mtls` |
+| Prepared queries (dynamic service lookup) | `dynamic` |
+| Progressive application rollouts | `progressive-rollouts` |
+| Services | `service` |
+| Sessions | `session` |
+| Static DNS queries (static service lookup) | `static` |
+| Service mesh telemetry | `telemetry` |
+| Transparent proxy | `transparent-proxy` |
+| Virtual services | `virtual-service` |
+| Consul DNS views | `views` |
+| Wide area network (WAN) federation | `wan-federation` |
+| Watches | `watch` |
+
+### Runtimes vocabulary
+
+Consul's _runtimes_ vocabulary collects the underlying runtimes where Consul supports operations.
+
+| Runtimes | Label |
+| :----------------------- | :------- |
+| Virtual machines (VMs) | `vm` |
+| Kubernetes | `k8s` |
+| Nomad | `nomad` |
+| Docker | `docker` |
+| HashiCorp Cloud Platform | `hcp` |
+
+#### Provider-specific runtimes
+
+This sub-group includes provider-specific runtimes, such as EKS and AKS.
+
+| Provider-specific runtimes | Label |
+| :----------------------------------- | :---------- |
+| AWS Elastic Container Service (ECS) | `ecs` |
+| AWS Elastic Kubernetes Service (EKS) | `eks` |
+| AWS Lambda (serverless) | `lambda` |
+| Azure Kubernetes Service (AKS) | `aks` |
+| Google Kubernetes Service (GKS) | `gks` |
+| OpenShift | `openshift` |
+| Argo | `argo` |
+
+### Actions vocabulary
+
+Consul's _actions_ vocabulary collects the actions user take to operate Consul and enact service networking states.
+
+| Action | Label |
+| :------------------------- | :--------------- |
+| Backup and restore | `backup-restore` |
+| Bootstrap | `bootstrap` |
+| Configure | `configure` |
+| Deploy | `deploy` |
+| Enable | `enable` |
+| Encrypt | `encrypt` |
+| Forward | `forwarding` |
+| Initialize a system | `initialize` |
+| Install a software package | `install` |
+| Deploy a listener | `listener` |
+| Manually take an action | `manual` |
+| Migrate | `migrate` |
+| Create a module | `module` |
+| Monitor | `monitor` |
+| Peer | `peer` |
+| Render | `render` |
+| Requirements | `requirements` |
+| Reroute traffic | `reroute` |
+| Rotate a certificate | `rotate` |
+| Route traffic | `route` |
+| Run | `run` |
+| Source | `source` |
+| Store | `store` |
+| Technical specifications | `tech-specs` |
+
+### Providers vocabulary
+
+Consul's _providers_ vocabulary collects the cloud providers and server locations that Consul runs on.
+
+| Provider | Label |
+| :-------------------------- | :--------- |
+| Amazon Web Services (AWS) | `aws` |
+| Microsoft Azure | `azure` |
+| Google Cloud Platform (GCP) | `gcp` |
+| External cloud provider | `external` |
+| Custom cloud provider | `custom` |
+
+### Interfaces vocabulary
+
+Consul's _interfaces_ vocabulary includes the methods for interacting with Consul agents.
+
+| Interface | Label |
+| :------------------------------------------- | :---- |
+| Command Line Interface (CLI) | `cli` |
+| HTTP Application Programming Interface (API) | `api` |
+| Browser-based user interface (UI) | `ui` |
+
+### Configuration entry vocabulary
+
+Consul's _configuration entry_ vocabulary collects the names of the configuration entries and custom resource definitions (CRDs) that you must define to control service mesh state.
+
+| Name | Label | Configuration entry | CRD |
+| :-------------------------- | :---------------------------- | :-----------------: | :------: |
+| API gateway | `api-gateway` | ✅ | ❌ |
+| Control plane request limit | `control-plane-request-limit` | ✅ | ✅ |
+| Exported services | `exported-services` | ✅ | ✅ |
+| File system certificates | `file-system-certificate` | ✅ | ❌ |
+| HTTP route | `http-route` | ✅ | ❌ |
+| Ingress gateway | `ingress-gateway` | ✅ | ✅ |
+| Inline certificate | `inline-certificate` | ✅ | ❌ |
+| JWT provider | `jwt-provider` | ✅ | ✅ |
+| Mesh | `mesh` | ✅ | ✅ |
+| Proxy defaults | `proxy-defaults` | ✅ | ✅ |
+| Registration | `registration` | ❌ | ✅ |
+| Sameness group | `sameness-group` | ✅ | ✅ |
+| Service defaults | `service-defaults` | ✅ | ✅ |
+| Service intentions | `service-intentions` | ✅ | ✅ |
+| Service resolver | `service-resolver` | ✅ | ✅ |
+| Service router | `service-router` | ✅ | ✅ |
+| Service splitter | `service-splitter` | ✅ | ✅ |
+| TCP route | `tcp-route` | ✅ | ❌ |
+| Terminating gateway | `terminating-gateway` | ✅ | ✅ |
+
+### Envoy extension vocabulary
+
+Consul's _Envoy extension_ vocabulary collects names of supported extensions that run on Envoy proxies.
+
+| Envoy extension | Label |
+| :------------------------------ | :------- |
+| Apigee authorization | `apigee` |
+| External service authorization | `ext` |
+| AWS Lambda functions | `lambda` |
+| Lua scripts | `lua` |
+| OpenTelemetry collector service | `otel` |
+| WebAssembly (WASM) plugins | `wasm` |
+
+## Guide to partials
+
+Partials have file paths that begin by describing the type of content. Then, the filepath mirrors existing structures in the main docs folder. There are two syntaxes used for the partial filepaths:
+
+
+
+```plaintext
+Format / Type / Phase / Feature / Runtime
+Examples / Component / Action / Filetype
+```
+
+
+
+Examples:
+
+- `text/descriptions/agent` contains a reusable description of the Consul agent that includes a link to additional information
+- `text/guidance/discover` contains all links to documentation and tutorials for the "Discover services" phase of usage
+- `tables/compatibility/k8s/lts` and `tables/compatibility/k8s/version` contain version compability tables that must iterate with each Consul release
+- `examples/agent/server/encrypted` contains an example of a server agent configuration file in a datacenter with encrypted TLS communication
+- `examples/agent/client/register-service` contains an example of a client agent configuration file that also registers a service to the node
+
+Reasons to use partials:
+
+- You need to repeat the same information, such as steps or requirements, across runtimes or cloud providers
+- You need to reference tables, especially ones that contain version numbers that are updated for each Consul release
+- You need to include a configuration example that can be reused in both reference and usage contexts
+- You need to add an entry to the glossary of terms
+
+## How to document new Consul features
+
+1. Create a file `name.mdx` that serves as an overview combining explanation, usage, and reference information.
+2. When you need more pages, move the file to a folder with `name` and change the filename to `index.mdx`.
+3. Create redirects as required.
+
+For example, "DNS views" was introduced for Kubernetes in Consul v1.20. We created a file, `manage/dns/views.mdx`, then expanded it to `manage/dns/views/index.mdx` and `manage/dns/views/enable` when the content was substantial enough to warrant separate pages. The first file is _always_ reachable at the URL `manage/dns/views`, despite the directory and filename change. The `k8s` label is not used because Kubernetes is the only runtime it supports. Hypothetically, if ECS support for DNS views became available, the directory structure for `content/docs/manage/dns` would become:
+
+```
+.
+├── forwarding.mdx
+└── views
+ ├── enable
+ | ├── ecs.mdx
+ | └── k8s.mdx
+ └── index.mdx
+```
+
+## Maintaining and deprecating content
+
+Documentation is considered "maintained" when the usage instructions work when running the oldest supported LTS release.
+
+When components and features are no longer maintained, they may be "deprecated" by R&D. To deprecate content:
+
+1. Add a deprecation callout to the page. List the date or version when the deprecation occured.
+1. On deprecation date, delete the content from the repository. Versioned docs preserves the information in older versions. If necessary, keep a single page in the documentation for announcement links and redirects.
+1. Add redirects for deprecated content.
+1. Move partials and images into a "legacy" folder if they are no longer used in the documentation.
+
+If it is possible to migrate existing data from a deprecated component to a replacement, document the migration steps.
diff --git a/website/content/api-docs/acl/auth-methods.mdx b/website/content/api-docs/acl/auth-methods.mdx
index 34f59cb866c5..4657f3b4da6c 100644
--- a/website/content/api-docs/acl/auth-methods.mdx
+++ b/website/content/api-docs/acl/auth-methods.mdx
@@ -15,7 +15,7 @@ ACL auth methods in Consul.
For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Create an Auth Method
@@ -51,7 +51,7 @@ The corresponding CLI command is [`consul acl auth-method create`](/consul/comma
- `Type` `(string: )` - The type of auth method being configured.
This field is immutable. For allowed values see the [auth method
- documentation](/consul/docs/security/acl/auth-methods).
+ documentation](/consul/docs/secure/acl/auth-method).
- `Description` `(string: "")` - Free form human readable description of the
auth method.
@@ -76,7 +76,7 @@ The corresponding CLI command is [`consul acl auth-method create`](/consul/comma
- `Config` `(map[string]string: )` - The raw configuration to use for
the chosen auth method. Contents will vary depending upon the type chosen.
For more information on configuring specific auth method types, see the [auth
- method documentation](/consul/docs/security/acl/auth-methods).
+ method documentation](/consul/docs/secure/acl/auth-method).
- `Namespace` `(string: "")` - Specifies the namespace of
the auth method you create. This field takes precedence over the `ns` query parameter,
@@ -107,7 +107,7 @@ The corresponding CLI command is [`consul acl auth-method create`](/consul/comma
prefixed-${serviceaccount.name}
```
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -180,7 +180,7 @@ The corresponding CLI command is [`consul acl auth-method read`](/consul/command
- `ns` `(string: "")` - Specifies the namespace of the auth method you look up.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -267,7 +267,7 @@ The corresponding CLI command is [`consul acl auth-method update`](/consul/comma
- `Config` `(map[string]string: )` - The raw configuration to use for
the chosen auth method. Contents will vary depending upon the type chosen.
For more information on configuring specific auth method types, see the [auth
- method documentation](/consul/docs/security/acl/auth-methods).
+ method documentation](/consul/docs/secure/acl/auth-method).
- `Namespace` `(string: "")` - Specifies the namespace of
the auth method you update. This field takes precedence over the `ns` query parameter,
@@ -298,7 +298,7 @@ The corresponding CLI command is [`consul acl auth-method update`](/consul/comma
prefixed-${serviceaccount.name}
```
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -375,7 +375,7 @@ The corresponding CLI command is [`consul acl auth-method delete`](/consul/comma
- `ns` `(string: "")` - Specifies the namespace of the auth method you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -418,7 +418,7 @@ The corresponding CLI command is [`consul acl auth-method list`](/consul/command
The namespace may be specified as '\*' and then results are returned for all namespaces.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
## Sample Request
diff --git a/website/content/api-docs/acl/binding-rules.mdx b/website/content/api-docs/acl/binding-rules.mdx
index 34edeadc4a95..a8697b94a82f 100644
--- a/website/content/api-docs/acl/binding-rules.mdx
+++ b/website/content/api-docs/acl/binding-rules.mdx
@@ -15,7 +15,7 @@ rules in Consul.
For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Create a Binding Rule
@@ -172,7 +172,7 @@ The corresponding CLI command is [`consul acl binding-rule create`](/consul/comm
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -240,7 +240,7 @@ The corresponding CLI command is [`consul acl binding-rule read`](/consul/comman
- `ns` `(string: "")` - Specifies the namespace of the binding rule you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -427,7 +427,7 @@ The corresponding CLI command is [`consul acl binding-rule update`](/consul/comm
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -495,7 +495,7 @@ The corresponding CLI command is [`consul acl binding-rule delete`](/consul/comm
- `ns` `(string: "")` - Specifies the namespace of the binding rule you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -539,7 +539,7 @@ The corresponding CLI command is [`consul acl binding-rule list`](/consul/comman
The namespace may be specified as '\*' to return results for all namespaces.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/acl/index.mdx b/website/content/api-docs/acl/index.mdx
index c45368370bf8..bf6a044a21c8 100644
--- a/website/content/api-docs/acl/index.mdx
+++ b/website/content/api-docs/acl/index.mdx
@@ -8,15 +8,15 @@ description: The /acl endpoints manage the Consul's ACL system.
The `/acl` endpoints are used to manage ACL tokens and policies in Consul, [bootstrap the ACL system](#bootstrap-acls) and [check ACL replication status](#check-acl-replication). There are additional pages for managing [tokens](/consul/api-docs/acl/tokens) and [policies](/consul/api-docs/acl/policies) with the `/acl` endpoints.
-For more information on how to setup ACLs, refer to the following resources:
+For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Bootstrap ACLs
This endpoint does a special one-time bootstrap of the ACL system, making the first
-management token if the [`acl.tokens.initial_management`](/consul/docs/agent/config/config-files#acl_tokens_initial_management)
+management token if the [`acl.tokens.initial_management`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_initial_management)
configuration entry is not specified in the Consul server configuration and if the
cluster has not been bootstrapped previously. An operator created token can be provided in the body of the request to
bootstrap the cluster if required. The provided token should be presented in a UUID format.
@@ -155,7 +155,7 @@ $ curl \
- `SourceDatacenter` - The authoritative ACL datacenter that ACLs are being
replicated from and will match the
- [`primary_datacenter`](/consul/docs/agent/config/config-files#primary_datacenter) configuration.
+ [`primary_datacenter`](/consul/docs/reference/agent/configuration-file/general#primary_datacenter) configuration.
- `ReplicationType` - The type of replication that is currently in use.
@@ -195,7 +195,7 @@ $ curl \
## Login to Auth Method
This endpoint was added in Consul 1.5.0 and is used to exchange an [auth
-method](/consul/docs/security/acl/auth-methods) bearer token for a newly-created
+method](/consul/docs/secure/acl/auth-method) bearer token for a newly-created
Consul ACL token.
| Method | Path | Produces |
@@ -214,7 +214,7 @@ The table below shows this endpoint's support for
-> **Note** - To use the login process to create tokens in any connected
secondary datacenter, [ACL
-replication](/consul/docs/agent/config/config-files#acl_enable_token_replication) must be
+replication](/consul/docs/reference/agent/configuration-file/acl#acl_enable_token_replication) must be
enabled. Login requires the ability to create local tokens which is restricted
to the primary datacenter and any secondary datacenters with ACL token
replication enabled.
@@ -329,7 +329,7 @@ $ curl \
This endpoint was added in Consul 1.8.0 and is used to obtain an authorization
-URL from Consul to start an [OIDC login flow](/consul/docs/security/acl/auth-methods/oidc).
+URL from Consul to start an [OIDC login flow](/consul/docs/secure/acl/auth-method/oidc).
| Method | Path | Produces |
| ------ | -------------------- | ------------------ |
@@ -347,7 +347,7 @@ The table below shows this endpoint's support for
-> **Note** - To use the login process to create tokens in any connected
secondary datacenter, [ACL
-replication](/consul/docs/agent/config/config-files#acl_enable_token_replication) must be
+replication](/consul/docs/reference/agent/configuration-file/acl#acl_enable_token_replication) must be
enabled. Login requires the ability to create local tokens which is restricted
to the primary datacenter and any secondary datacenters with ACL token
replication enabled.
@@ -360,7 +360,7 @@ replication enabled.
### JSON Request Body Schema
- `AuthMethod` `(string: )` - The name of the auth method to use for
- login. This must be of type [`oidc`](/consul/docs/security/acl/auth-methods/oidc).
+ login. This must be of type [`oidc`](/consul/docs/secure/acl/auth-method/oidc).
- `RedirectURI` `(string: )` - See [Redirect
URIs](/consul/docs/security/acl/auth-methods/oidc#redirect-uris) for more information.
@@ -430,7 +430,7 @@ The table below shows this endpoint's support for
-> **Note** - To use the login process to create tokens in any connected
secondary datacenter, [ACL
-replication](/consul/docs/agent/config/config-files#acl_enable_token_replication) must be
+replication](/consul/docs/reference/agent/configuration-file/acl#acl_enable_token_replication) must be
enabled. Login requires the ability to create local tokens which is restricted
to the primary datacenter and any secondary datacenters with ACL token
replication enabled.
@@ -443,7 +443,7 @@ replication enabled.
### JSON Request Body Schema
- `AuthMethod` `(string: )` - The name of the auth method to use for
- login. This must be of type [`oidc`](/consul/docs/security/acl/auth-methods/oidc).
+ login. This must be of type [`oidc`](/consul/docs/secure/acl/auth-method/oidc).
- `State` `(string: )` - Opaque state ID that is part of the
Authorization URL and will be included in the redirect following
diff --git a/website/content/api-docs/acl/policies.mdx b/website/content/api-docs/acl/policies.mdx
index 1d46fa1b5125..220038fea9a1 100644
--- a/website/content/api-docs/acl/policies.mdx
+++ b/website/content/api-docs/acl/policies.mdx
@@ -47,7 +47,7 @@ The corresponding CLI command is [`consul acl policy create`](/consul/commands/a
- `Description` `(string: "")` - Free form human readable description of the policy.
- `Rules` `(string: "")` - Specifies rules for the ACL policy. The format of the
- `Rules` property is detailed in the [ACL Rules documentation](/consul/docs/security/acl/acl-rules).
+ `Rules` property is detailed in the [ACL Rules documentation](/consul/docs/reference/acl/rule).
- `Datacenters` `(array)` - Specifies the datacenters the policy is valid within.
When no datacenters are provided the policy is valid in all datacenters including
@@ -57,7 +57,7 @@ The corresponding CLI command is [`consul acl policy create`](/consul/commands/a
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -172,7 +172,7 @@ The corresponding CLI command is [`consul acl policy read -name=`](/cons
- `ns` `(string: "")` - Specifies the namespace of the policy you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -236,7 +236,7 @@ The corresponding CLI command is [`consul acl policy update`](/consul/commands/a
- `Description` `(string: "")` - Free form human readable description of this policy.
- `Rules` `(string: "")` - Specifies rules for this ACL policy. The format of the
- `Rules` property is detailed in the [ACL Rules documentation](/consul/docs/security/acl/acl-rules).
+ `Rules` property is detailed in the [ACL Rules documentation](/consul/docs/reference/acl/rule).
- `Datacenters` `(array)` - Specifies the datacenters this policy is valid within.
When no datacenters are provided the policy is valid in all datacenters including
@@ -246,7 +246,7 @@ The corresponding CLI command is [`consul acl policy update`](/consul/commands/a
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -313,7 +313,7 @@ The corresponding CLI command is [`consul acl policy delete`](/consul/commands/a
- `ns` `(string: "")` - Specifies the namespace of the policy you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -354,7 +354,7 @@ The corresponding CLI command is [`consul acl policy list`](/consul/commands/acl
The namespace may be specified as '\*' to return results for all namespaces.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/acl/roles.mdx b/website/content/api-docs/acl/roles.mdx
index e54e9a54b07b..607055bd0e11 100644
--- a/website/content/api-docs/acl/roles.mdx
+++ b/website/content/api-docs/acl/roles.mdx
@@ -13,7 +13,7 @@ The `/acl/role` endpoints [create](#create-a-role), [read](#read-a-role),
For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Create a Role
@@ -95,7 +95,7 @@ The corresponding CLI command is [`consul acl role create`](/consul/commands/acl
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -225,7 +225,7 @@ The corresponding CLI command is [`consul acl role read`](/consul/commands/acl/r
- `ns` `(string: "")` - Specifies the namespace of the role you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -310,7 +310,7 @@ The corresponding CLI command is [`consul acl role read -name=`](/consul
- `ns` `(string: "")` - Specifies the namespace of the role you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -424,7 +424,7 @@ The corresponding CLI command is [`consul acl role update`](/consul/commands/acl
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -531,7 +531,7 @@ The corresponding CLI command is [`consul acl role delete`](/consul/commands/acl
- `ns` `(string: "")` - Specifies the namespace of the role you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -575,7 +575,7 @@ The corresponding CLI command is [`consul acl role list`](/consul/commands/acl/r
The namespace may be specified as '\*' to return results for all namespaces.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
## Sample Request
diff --git a/website/content/api-docs/acl/templated-policies.mdx b/website/content/api-docs/acl/templated-policies.mdx
index de3a6b59aef8..b980b4efc9f9 100644
--- a/website/content/api-docs/acl/templated-policies.mdx
+++ b/website/content/api-docs/acl/templated-policies.mdx
@@ -11,7 +11,7 @@ The `/acl/templated-policy` endpoints [read](#read-a-templated-policy-by-name),
For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Read a templated policy by name
@@ -87,7 +87,7 @@ The corresponding CLI command is [`consul acl templated-policy preview`](/consul
- `Name` `(string: )` - Specifies the value of the `name` variable in the templated policy variables.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample payload
diff --git a/website/content/api-docs/acl/tokens.mdx b/website/content/api-docs/acl/tokens.mdx
index af629a3500cf..19bb2eab8ba9 100644
--- a/website/content/api-docs/acl/tokens.mdx
+++ b/website/content/api-docs/acl/tokens.mdx
@@ -11,7 +11,7 @@ The `/acl/token` endpoints [create](#create-a-token), [read](#read-a-token),
For more information on how to setup ACLs, refer to the following resources:
-- [Access control list (ACL) overview](/consul/docs/security/acl)
+- [Access control list (ACL) overview](/consul/docs/secure/acl)
- [ACL tutorial](/consul/tutorials/security/access-control-setup-production)
## Create a Token
@@ -118,7 +118,7 @@ The corresponding CLI command is [`consul acl token create`](/consul/commands/ac
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -219,7 +219,7 @@ The corresponding CLI command is [`consul acl token read`](/consul/commands/acl/
- `expanded` `(bool: false)` - If this field is set, the contents of all policies and
roles affecting the token will also be returned.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -501,7 +501,7 @@ The corresponding CLI command is [`consul acl token update`](/consul/commands/ac
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -597,7 +597,7 @@ The corresponding CLI command is [`consul acl token clone`](/consul/commands/acl
This field takes precedence over the `ns` query parameter,
one of several [other methods to specify the namespace](#methods-to-specify-namespace).
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -676,7 +676,7 @@ The corresponding CLI command is [`consul acl token delete`](/consul/commands/ac
- `ns` `(string: "")` - Specifies the namespace of the token you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -733,7 +733,7 @@ The corresponding CLI command is [`consul acl token list`](/consul/commands/acl/
The namespace may be specified as '\*' to return results for all namespaces.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/admin-partitions.mdx b/website/content/api-docs/admin-partitions.mdx
index f71fff17256e..7d66a04ca03b 100644
--- a/website/content/api-docs/admin-partitions.mdx
+++ b/website/content/api-docs/admin-partitions.mdx
@@ -4,7 +4,7 @@ page_title: Admin Partition - HTTP API
description: The /partition endpoints allow for managing Consul Enterprise Admin Partitions.
---
-@include 'http_api_and_cli_characteristics_links.mdx'
+@include 'legacy/http_api_and_cli_characteristics_links.mdx'
# Admin Partition - HTTP API
diff --git a/website/content/api-docs/agent/check.mdx b/website/content/api-docs/agent/check.mdx
index 9418d0668463..3d3c9e365983 100644
--- a/website/content/api-docs/agent/check.mdx
+++ b/website/content/api-docs/agent/check.mdx
@@ -6,7 +6,7 @@ description: The /agent/check endpoints interact with checks on the local agent
# Check - Agent HTTP API
-Refer to [Define Health Checks](/consul/docs/services/usage/checks) for information about Consul health check capabilities.
+Refer to [Define Health Checks](/consul/docs/register/health-check/vm) for information about Consul health check capabilities.
The `/agent/check` endpoints interact with health checks
managed by the local agent in Consul.
These should not be confused with checks in the catalog.
@@ -20,10 +20,10 @@ using the HTTP API.
It is important to note that the checks known by the agent may be different from
those reported by the catalog. This is usually due to changes being made while
there is no leader elected. The agent performs active
-[anti-entropy](/consul/docs/architecture/anti-entropy), so in most situations
+[anti-entropy](/consul/docs/concept/consistency), so in most situations
everything will be in sync within a few seconds.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | --------------- | ------------------ |
diff --git a/website/content/api-docs/agent/connect.mdx b/website/content/api-docs/agent/connect.mdx
index edef8125b336..b5fe00d9d9f5 100644
--- a/website/content/api-docs/agent/connect.mdx
+++ b/website/content/api-docs/agent/connect.mdx
@@ -27,8 +27,8 @@ them in the native configuration of the proxy itself (such as RBAC for Envoy).
This endpoint tests whether a connection attempt is authorized between
two services. This is the primary API that must be implemented by
-[proxies](/consul/docs/connect/proxies) or
-[native integrations](/consul/docs/connect/native)
+[proxies](/consul/docs/connect/proxy) or
+[native integrations](/consul/docs/automate/native)
that wish to integrate with the service mesh. Prior to calling this API, it is expected
that the client TLS certificate has been properly verified against the
current CA roots.
@@ -104,8 +104,8 @@ $ curl \
## Certificate Authority (CA) Roots
This endpoint returns the trusted certificate authority (CA) root certificates.
-This is used by [proxies](/consul/docs/connect/proxies) or
-[native integrations](/consul/docs/connect/native) to verify served client
+This is used by [proxies](/consul/docs/connect/proxy) or
+[native integrations](/consul/docs/automate/native) to verify served client
or server certificates are valid.
This is equivalent to the [non-Agent service mesh endpoint](/consul/api-docs/connect),
diff --git a/website/content/api-docs/agent/index.mdx b/website/content/api-docs/agent/index.mdx
index 749ea80f291d..29aaccb3a865 100644
--- a/website/content/api-docs/agent/index.mdx
+++ b/website/content/api-docs/agent/index.mdx
@@ -12,7 +12,7 @@ The `/agent` endpoints are used to interact with the local Consul agent.
Usually, services and checks are registered with an agent which then takes on
the burden of keeping that data synchronized with the cluster. For example, the
agent registers services and checks with the Catalog and performs
-[anti-entropy](/consul/docs/architecture/anti-entropy) to recover from outages.
+[anti-entropy](/consul/docs/concept/consistency) to recover from outages.
In addition to these endpoints, additional endpoints are grouped in the
navigation for `Checks` and `Services`.
@@ -231,7 +231,7 @@ to the nature of gossip, this is eventually consistent: the results may differ
by agent. The strongly consistent view of nodes is instead provided by
`/v1/catalog/nodes`.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ---------------- | ------------------ |
@@ -261,7 +261,7 @@ The corresponding CLI command is [`consul members`](/consul/commands/members).
network segment). When querying a server, setting this to the special string `_all`
will show members in all segments.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -453,12 +453,12 @@ $ curl \
## View Metrics
This endpoint will dump the metrics for the most recent finished interval.
-For more information about metrics, see the [telemetry](/consul/docs/agent/telemetry)
+For more information about metrics, see the [telemetry](/consul/docs/monitor/telemetry/agent)
page.
In order to enable [Prometheus](https://prometheus.io/) support, you need to use the
configuration directive
-[`prometheus_retention_time`](/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time).
+[`prometheus_retention_time`](/consul/docs/reference/agent/configuration-file/telemetry#telemetry-prometheus_retention_time).
Since Consul 1.7.2 this endpoint will also automatically switch output format if
the request contains an `Accept` header with a compatible MIME type such as
@@ -765,7 +765,7 @@ $ curl \
This endpoint updates the ACL tokens currently in use by the agent. It can be
used to introduce ACL tokens to the agent for the first time, or to update
tokens that were initially loaded from the agent's configuration. Tokens will be persisted
-only if the [`acl.enable_token_persistence`](/consul/docs/agent/config/config-files#acl_enable_token_persistence)
+only if the [`acl.enable_token_persistence`](/consul/docs/reference/agent//acl#acl_enable_token_persistence)
configuration is `true`. When not being persisted, they will need to be reset if the agent
is restarted.
@@ -778,10 +778,10 @@ is restarted.
| `PUT` | `/agent/token/replication` | `application/json` |
The paths above correspond to the token names as found in the agent configuration:
-[`default`](/consul/docs/agent/config/config-files#acl_tokens_default), [`agent`](/consul/docs/agent/config/config-files#acl_tokens_agent),
-[`agent_recovery`](/consul/docs/agent/config/config-files#acl_tokens_agent_recovery),
-[`config_file_service_registration`](/consul/docs/agent/config/config-files#acl_tokens_config_file_service_registration),
-and [`replication`](/consul/docs/agent/config/config-files#acl_tokens_replication).
+[`default`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_default), [`agent`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_agent),
+[`agent_recovery`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_agent_recovery),
+[`config_file_service_registration`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_config_file_service_registration),
+and [`replication`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_replication).
-> **Deprecation Note:** The following paths were deprecated in version 1.11
@@ -790,7 +790,7 @@ and [`replication`](/consul/docs/agent/config/config-files#acl_tokens_replicatio
| `PUT` | `/agent/token/agent_master` | `application/json` |
The paths above correspond to the token names as found in the agent configuration:
-[`agent_master`](/consul/docs/agent/config/config-files#acl_tokens_agent_master).
+[`agent_master`](/consul/docs/reference/agent/configuration-file/acl#acl_tokens_agent_master).
-> **Deprecation Note:** The following paths were deprecated in version 1.4.3
@@ -802,9 +802,9 @@ The paths above correspond to the token names as found in the agent configuratio
| `PUT` | `/agent/token/acl_replication_token` | `application/json` |
The paths above correspond to the token names as found in the agent configuration:
-[`acl_token`](/consul/docs/agent/config/config-files#acl_token_legacy), [`acl_agent_token`](/consul/docs/agent/config/config-files#acl_agent_token_legacy),
-[`acl_agent_master_token`](/consul/docs/agent/config/config-files#acl_agent_master_token_legacy), and
-[`acl_replication_token`](/consul/docs/agent/config/config-files#acl_replication_token_legacy).
+[`acl_token`](/consul/docs/reference/agent/configuration-file/acl#acl_token_legacy), [`acl_agent_token`](/consul/docs/reference/agent/configuration-file/acl#acl_agent_token_legacy),
+[`acl_agent_master_token`](/consul/docs/reference/agent/configuration-file/acl#acl_agent_master_token_legacy), and
+[`acl_replication_token`](/consul/docs/reference/agent/configuration-file/acl#acl_replication_token_legacy).
The table below shows this endpoint's support for
[blocking queries](/consul/api-docs/features/blocking),
diff --git a/website/content/api-docs/agent/service.mdx b/website/content/api-docs/agent/service.mdx
index 301827418270..0212586047db 100644
--- a/website/content/api-docs/agent/service.mdx
+++ b/website/content/api-docs/agent/service.mdx
@@ -20,10 +20,10 @@ or added dynamically using the HTTP API.
It is important to note that the services known by the agent may be different
from those reported by the catalog. This is usually due to changes being made
while there is no leader elected. The agent performs active
-[anti-entropy](/consul/docs/architecture/anti-entropy), so in most situations
+[anti-entropy](/consul/docs/concept/consistency), so in most situations
everything will be in sync within a few seconds.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ----------------- | ------------------ |
@@ -130,13 +130,13 @@ following selectors and filter operations being supported:
This endpoint was added in Consul 1.3.0 and returns the full service definition
for a single service instance registered on the local agent. It is used by
-[service mesh proxies](/consul/docs/connect/proxies) to discover the embedded proxy
+[service mesh proxies](/consul/docs/connect/proxy) to discover the embedded proxy
configuration that was registered with the instance.
It is important to note that the services known by the agent may be different
from those reported by the catalog. This is usually due to changes being made
while there is no leader elected. The agent performs active
-[anti-entropy](/consul/docs/architecture/anti-entropy), so in most situations
+[anti-entropy](/consul/docs/concept/consistency), so in most situations
everything will be in sync within a few seconds.
| Method | Path | Produces |
@@ -180,7 +180,7 @@ $ curl \
### Sample Response
The response contains the fields specified in the [service
-definition](/consul/docs/services/configuration/services-configuration-reference), but it includes an extra `ContentHash` field that contains the [hash-based blocking
+definition](/consul/docs/reference/service), but it includes an extra `ContentHash` field that contains the [hash-based blocking
query](/consul/api-docs/features/blocking#hash-based-blocking-queries) hash for the result. The
same hash is also present in `X-Consul-ContentHash`.
@@ -604,14 +604,14 @@ The `/agent/service/register` endpoint supports camel case and _snake case_ for
- `Name` `(string: )` - Specifies the logical name of the service.
Many service instances may share the same logical service name. We recommend using
- valid DNS labels for service definition names. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Service names that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/services/configuration/services-configuration-reference#name) for additional information.
+ valid DNS labels for service definition names. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Service names that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/reference/service#name) for additional information.
- `ID` `(string: "")` - Specifies a unique ID for this service. This must be
unique per _agent_. This defaults to the `Name` parameter if not provided.
- `Tags` `(array: nil)` - Specifies a list of tags to assign to the
service. Tags enable you to filter when querying for the services and are exposed in Consul APIs. We recommend using
- valid DNS labels for tags. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Tags that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/services/configuration/services-configuration-reference#tags) for additional information.
+ valid DNS labels for tags. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Tags that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/reference/service#tags) for additional information.
- `Address` `(string: "")` - Specifies the address of the service. If not
provided, the agent's address is used as the address for the service during
@@ -634,16 +634,16 @@ The `/agent/service/register` endpoint supports camel case and _snake case_ for
typical Consul service. You can specify the following values:
- `"connect-proxy"` for [service mesh](/consul/docs/connect) proxies representing another service
- `"mesh-gateway"` for instances of a [mesh gateway](/consul/docs/connect/gateways/mesh-gateway#service-mesh-proxy-configuration)
- - `"terminating-gateway"` for instances of a [terminating gateway](/consul/docs/connect/gateways/terminating-gateway)
- - `"ingress-gateway"` for instances of an [ingress gateway](/consul/docs/connect/gateways/ingress-gateway)
+ - `"terminating-gateway"` for instances of a [terminating gateway](/consul/docs/north-south/terminating-gateway)
+ - `"ingress-gateway"` for instances of an [ingress gateway](/consul/docs/north-south/ingress-gateway)
- `Proxy` `(Proxy: nil)` - From 1.2.3 on, specifies the configuration for a
service mesh proxy instance. This is only valid if `Kind` defines a proxy or gateway.
- Refer to the [Service mesh proxy configuration reference](/consul/docs/connect/proxies/proxy-config-reference)
+ Refer to the [Service mesh proxy configuration reference](/consul/docs/reference/proxy/connect-proxy)
for full details.
- `Connect` `(Connect: nil)` - Specifies the
- [configuration for service mesh](/consul/docs/connect/configuration). The connect subsystem provides Consul's service mesh capabilities. Refer to the
+ [configuration for service mesh](/consul/docs/connect). The connect subsystem provides Consul's service mesh capabilities. Refer to the
[Connect Structure](#connect-structure) section below for supported fields.
- `Check` `(Check: nil)` - Specifies a check. Please see the
@@ -681,27 +681,27 @@ The `/agent/service/register` endpoint supports camel case and _snake case_ for
Weights only apply to the locally registered service.
If multiple nodes register the same service, each node implements `EnableTagOverride` and other service configuration items independently. Updating the tags for the service registered
on one node does not necessarily update the same tags on services with the same name registered on another node. If `EnableTagOverride` is not specified the default value is
- `false`. See [anti-entropy syncs](/consul/docs/architecture/anti-entropy) for
+ `false`. See [anti-entropy syncs](/consul/docs/concept/consistency) for
additional information.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
#### Connect Structure
For the `Connect` field, the parameters are:
- `Native` `(bool: false)` - Specifies whether this service supports
- the [Consul service mesh](/consul/docs/connect) protocol [natively](/consul/docs/connect/native).
+ the [Consul service mesh](/consul/docs/connect) protocol [natively](/consul/docs/automate/native).
If this is true, then service mesh proxies, DNS queries, etc. will be able to
service discover this service.
- `Proxy` `(Proxy: nil)` -
**Deprecated** Specifies that a managed service mesh proxy should be started
for this service instance, and optionally provides configuration for the proxy.
Managed proxies (which have been deprecated since Consul v1.3.0) have been
- [removed](/consul/docs/connect/proxies) since v1.6.0.
+ [removed](/consul/docs/connect/proxy) since v1.6.0.
- `SidecarService` `(ServiceDefinition: nil)` - Specifies an optional nested
service definition to register. Refer to
- [Deploy sidecar services](/consul/docs/connect/proxies/deploy-sidecar-services) for additional information.
+ [Deploy sidecar services](/consul/docs/connect/proxy/sidecar) for additional information.
### Sample Payload
@@ -771,7 +771,7 @@ The corresponding CLI command is [`consul services deregister`](/consul/commands
- `ns` `(string: "")` - Specifies the namespace of the service you deregister.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/api-structure.mdx b/website/content/api-docs/api-structure.mdx
index edfbcf32589e..48b9ec207f41 100644
--- a/website/content/api-docs/api-structure.mdx
+++ b/website/content/api-docs/api-structure.mdx
@@ -44,7 +44,7 @@ $ curl \
this method is highly discouraged because the token can show up in access logs as part of the URL.
The `?token=` query parameter is deprecated and will be removed in a future Consul version.
-To learn more about the ACL system read the [documentation](/consul/docs/security/acl).
+To learn more about the ACL system read the [documentation](/consul/docs/secure/acl).
## Version Prefix
@@ -89,7 +89,7 @@ However, we generally recommend using resource names that don't require URL-enco
Depending on the validation that Consul applies to a resource name,
Consul may still reject a request if it considers the resource name invalid for that endpoint.
And even if Consul considers the resource name valid, it may degrade other functionality,
-such as failed [DNS lookups](/consul/docs/services/discovery/dns-overview)
+such as failed [DNS lookups](/consul/docs/discover/dns)
for nodes or services with names containing invalid DNS characters.
This HTTP API capability also allows the
@@ -103,7 +103,7 @@ The linefeed character (`%0a`) will cause a request to be rejected even if it is
Consul 0.7 added the ability to translate addresses in HTTP response based on
the configuration setting for
-[`translate_wan_addrs`](/consul/docs/agent/config/config-files#translate_wan_addrs). In order
+[`translate_wan_addrs`](/consul/docs/reference/agent/configuration-file/general#translate_wan_addrs). In order
to allow clients to know if address translation is in effect, the
`X-Consul-Translate-Addresses` header will be added if translation is enabled,
and will have a value of `true`. If translation is not enabled then this header
@@ -114,9 +114,9 @@ will not be present.
All API responses for Consul versions after 1.9 will include an HTTP response
header `X-Consul-Default-ACL-Policy` set to either "allow" or "deny" which
mirrors the current value of the agent's
-[`acl.default_policy`](/consul/docs/agent/config/config-files#acl_default_policy) option.
+[`acl.default_policy`](/consul/docs/reference/agent/configuration-file/acl#acl_default_policy) option.
-This is also the default [intention](/consul/docs/connect/intentions) enforcement
+This is also the default [intention](/consul/docs/secure-mesh/intention) enforcement
action if no intention matches.
This is returned even if ACLs are disabled.
diff --git a/website/content/api-docs/catalog.mdx b/website/content/api-docs/catalog.mdx
index f773353483a2..33cb1e21d8d0 100644
--- a/website/content/api-docs/catalog.mdx
+++ b/website/content/api-docs/catalog.mdx
@@ -17,7 +17,7 @@ API methods look similar.
This endpoint is a low-level mechanism for registering or updating
entries in the catalog. It is usually preferable to instead use the
[agent endpoints](/consul/api-docs/agent) for registration as they are simpler and
-perform [anti-entropy](/consul/docs/architecture/anti-entropy).
+perform [anti-entropy](/consul/docs/concept/consistency).
| Method | Path | Produces |
| ------ | ------------------- | ------------------ |
@@ -57,7 +57,7 @@ The table below shows this endpoint's support for
- `Service` `(Service: nil)` - Contains an object the specifies the service to register. The `Service.Service` field is required. If `Service.ID` is not provided, the default is the `Service.Service`.
You can only specify one service with a given `ID` per node. We recommend using
- valid DNS labels for service definition names. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Service names that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/services/configuration/services-configuration-reference#name) for additional information.
+ valid DNS labels for service definition names. Refer to the Internet Engineering Task Force's [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123#page-72) for additional information. Service names that conform to standard usage ensures compatibility with external DNSs. Refer to [Services Configuration Reference](/consul/docs/reference/service#name) for additional information.
The following fields are optional:
- `Tags`
- `Address`
@@ -79,7 +79,7 @@ The table below shows this endpoint's support for
treated as a service level health check, instead of a node level health
check. The `Status` must be one of `passing`, `warning`, or `critical`.
- You can provide defaults for TCP and HTTP health checks to the `Definition` field. Refer to [Health Checks](/consul/docs/services/usage/checks) for additional information.
+ You can provide defaults for TCP and HTTP health checks to the `Definition` field. Refer to [Health Checks](/consul/docs/register/health-check/vm) for additional information.
Multiple checks can be provided by replacing `Check` with `Checks` and
sending an array of `Check` objects.
@@ -169,7 +169,7 @@ $ curl \
This endpoint is a low-level mechanism for directly removing
entries from the Catalog. It is usually preferable to instead use the
[agent endpoints](/consul/api-docs/agent) for deregistration as they are simpler and
-perform [anti-entropy](/consul/docs/architecture/anti-entropy).
+perform [anti-entropy](/consul/docs/concept/consistency).
| Method | Path | Produces |
| ------ | --------------------- | ------------------ |
@@ -301,7 +301,7 @@ $ curl \
This endpoint and returns the nodes registered in a given datacenter.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ---------------- | ------------------ |
@@ -337,7 +337,7 @@ The corresponding CLI command is [`consul catalog nodes`](/consul/commands/catal
- `filter` `(string: "")` - Specifies the expression used to filter the
queries results prior to returning the data.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -399,7 +399,7 @@ the following selectors and filter operations being supported:
This endpoint returns the services registered in a given datacenter.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------------- | ------------------ |
@@ -434,7 +434,7 @@ The corresponding CLI command is [`consul catalog services`](/consul/commands/ca
- `filter` `(string: "")` - Specifies the expression used to filter the
queries results prior to returning the data.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Filtering
@@ -506,7 +506,7 @@ a given service.
This endpoint returns the nodes providing a service in a given datacenter.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------------------- | ------------------ |
@@ -551,10 +551,12 @@ The table below shows this endpoint's support for
- `filter` `(string: "")` - Specifies the expression used to filter the
queries results prior to returning the data.
+- `peer` `(string: "")` - Specifies the name of the peer that exported the service. Does not apply when no cluster peering connections exist.
+
- `merge-central-config` - Include this flag in a request for `connect-proxy` kind or `*-gateway` kind
services to return a fully resolved service definition that includes merged values from the
- [proxy-defaults/global](/consul/docs/connect/config-entries/proxy-defaults) and
- [service-defaults/:service](/consul/docs/connect/config-entries/service-defaults) config entries.
+ [proxy-defaults/global](/consul/docs/reference/config-entry/proxy-defaults) and
+ [service-defaults/:service](/consul/docs/reference/config-entry/service-defaults) config entries.
Returning a fully resolved service definition is useful when a service was registered using the
[/catalog/register](/consul/api-docs/catalog#register_entity) endpoint, which does not automatically merge config entries.
@@ -664,7 +666,7 @@ $ curl \
[service registration API](/consul/api-docs/agent/service#kind) for more information.
- `ServiceProxy` is the proxy config as specified in
- [service mesh Proxies](/consul/docs/connect/proxies).
+ [service mesh Proxies](/consul/docs/connect/proxy).
- `ServiceConnect` are the [service mesh](/consul/docs/connect) settings. The
value of this struct is equivalent to the `Connect` field for service
@@ -726,7 +728,7 @@ This will include both proxies and native integrations. A service may
register both mesh-capable and incapable services at the same time,
so this endpoint may be used to filter only the mesh-capable endpoints.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | --------------------------- | ------------------ |
@@ -739,7 +741,7 @@ Parameters and response format are the same as
This endpoint returns the node's registered services.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------------- | ------------------ |
@@ -868,7 +870,7 @@ top level Node object. The following selectors and filter operations are support
This endpoint returns the node's registered services.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ----------------------------------- | ------------------ |
@@ -898,8 +900,8 @@ The table below shows this endpoint's support for
- `merge-central-config` - Include this flag in a request for `connect-proxy` kind or `*-gateway` kind
services to return a fully resolved service definition that includes merged values from the
- [proxy-defaults/global](/consul/docs/connect/config-entries/proxy-defaults) and
- [service-defaults/:service](/consul/docs/connect/config-entries/service-defaults) config entries.
+ [proxy-defaults/global](/consul/docs/reference/config-entry/proxy-defaults) and
+ [service-defaults/:service](/consul/docs/reference/config-entry/service-defaults) config entries.
Returning a fully resolved service definition is useful when a service was registered using the
[/catalog/register](/consul/api-docs/catalog#register_entity) endpoint, which does not automatically merge config entries.
@@ -1009,7 +1011,7 @@ top level object. The following selectors and filter operations are supported:
This endpoint returns the services associated with an ingress gateway or terminating gateway.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------------------------------ | ------------------ |
diff --git a/website/content/api-docs/config.mdx b/website/content/api-docs/config.mdx
index 49243da09456..05ffe8506e91 100644
--- a/website/content/api-docs/config.mdx
+++ b/website/content/api-docs/config.mdx
@@ -10,9 +10,9 @@ description: |-
The `/config` endpoints create, update, delete and query central configuration
entries registered with Consul. See the
-[agent configuration](/consul/docs/agent/config/config-files#enable_central_service_config)
+[agent configuration](/consul/docs/reference/agent/configuration-file/general#enable_central_service_config)
for more information on how to enable this functionality for centrally
-configuring services and [configuration entries docs](/consul/docs/agent/config-entries) for a description
+configuring services and [configuration entries docs](/consul/docs/fundamentals/config-entry) for a description
of the configuration entries content.
## Apply Configuration
@@ -71,7 +71,7 @@ The ACL required depends on the config entry being written:
- `ns` `(string: "")` - Specifies the namespace of the config entry you apply.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Payload
@@ -152,7 +152,7 @@ The ACL required depends on the config entry kind being read:
- `ns` `(string: "")` - Specifies the namespace of the config entry you lookup
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -178,7 +178,7 @@ $ curl \
This endpoint returns all config entries of the given kind.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | --------------- | ------------------ |
@@ -232,7 +232,7 @@ The corresponding CLI command is [`consul config list`](/consul/commands/config/
- `ns` `(string: "")` - Specifies the namespace of the config entries you lookup.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -327,7 +327,7 @@ The corresponding CLI command is [`consul config delete`](/consul/commands/confi
- `ns` `(string: "")` - Specifies the namespace of the config entry you delete.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/connect/ca.mdx b/website/content/api-docs/connect/ca.mdx
index ad808c13df44..538fd7620640 100644
--- a/website/content/api-docs/connect/ca.mdx
+++ b/website/content/api-docs/connect/ca.mdx
@@ -182,7 +182,7 @@ The corresponding CLI command is [`consul connect ca set-config`](/consul/comman
- `Config` `(map[string]string: )` - The raw configuration to use
for the chosen provider. For more information on configuring the service mesh CA
- providers, see [Provider Config](/consul/docs/connect/ca).
+ providers, see [Provider Config](/consul/docs/secure-mesh/certificate).
- `ForceWithoutCrossSigning` `(bool: false)` - Indicates that the CA change
should be forced to complete even if the current CA doesn't support root cross-signing.
diff --git a/website/content/api-docs/connect/intentions.mdx b/website/content/api-docs/connect/intentions.mdx
index 95a5cfca60a4..db59da3528b3 100644
--- a/website/content/api-docs/connect/intentions.mdx
+++ b/website/content/api-docs/connect/intentions.mdx
@@ -9,11 +9,11 @@ description: |-
# Intentions - Connect HTTP API
The `/connect/intentions` endpoint provide tools for managing
-[intentions](/consul/docs/connect/intentions).
+[intentions](/consul/docs/secure-mesh/intention).
-> **1.9.0 and later:** Reading and writing intentions has been
migrated to the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions)
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions)
config entry kind.
## Upsert Intention by Name ((#upsert-intention-by-name))
@@ -60,7 +60,7 @@ The corresponding CLI command is [`consul intention create -replace`](/consul/co
as shown in the [source and destination naming conventions](/consul/commands/intention#source-and-destination-naming).
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### JSON Request Body Schema
@@ -76,7 +76,7 @@ The corresponding CLI command is [`consul intention create -replace`](/consul/co
the `Permissions` field.
- `Permissions` `(array)` - The list of all [additional L7
- attributes](/consul/docs/connect/config-entries/service-intentions#intentionpermission)
+ attributes](/consul/docs/reference/config-entry/service-intentions#intentionpermission)
that extend the intention match criteria.
Permission precedence is applied top to bottom. For any given request the
@@ -84,7 +84,7 @@ The corresponding CLI command is [`consul intention create -replace`](/consul/co
evaluation. As with L4 intentions, traffic that fails to match any of the
provided permissions in this intention will be subject to the default
intention behavior is defined by the default [ACL
- policy](/consul/docs/agent/config/config-files#acl_default_policy).
+ policy](/consul/docs/reference/agent/configuration-file/acl#acl_default_policy).
This should be omitted for an L4 intention as it is mutually exclusive with
the `Action` field.
@@ -120,7 +120,7 @@ true
-> **Deprecated** - This endpoint is deprecated in Consul 1.9.0 in favor of
[upserting by name](#upsert-intention-by-name) or editing the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entry for the destination.
This endpoint creates a new intention and returns its ID if it was created
@@ -153,7 +153,7 @@ The corresponding CLI command is [`consul intention create`](/consul/commands/in
as shown in the [source and destination naming conventions](/consul/commands/intention#source-and-destination-naming).
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### JSON Request Body Schema
@@ -215,7 +215,7 @@ $ curl \
-> **Deprecated** - This endpoint is deprecated in Consul 1.9.0 in favor of
[upserting by name](#upsert-intention-by-name) or editing the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entry for the destination.
This endpoint updates an intention with the given values.
@@ -294,7 +294,7 @@ The corresponding CLI command is [`consul intention get`](/consul/commands/inten
as shown in the [source and destination naming conventions](/consul/commands/intention#source-and-destination-naming).
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -325,7 +325,7 @@ $ curl \
-> **Deprecated** - This endpoint is deprecated in Consul 1.9.0 in favor of
[reading by name](#read-specific-intention-by-name) or by viewing the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions)
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions)
config entry for the destination.
This endpoint reads a specific intention.
@@ -382,7 +382,7 @@ $ curl \
This endpoint lists all intentions.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | --------------------- | ------------------ |
@@ -410,7 +410,7 @@ The corresponding CLI command is [`consul intention list`](/consul/commands/inte
The `*` wildcard may be used to list intentions from all namespaces.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -493,7 +493,7 @@ The corresponding CLI command is [`consul intention delete`](/consul/commands/in
as shown in the [source and destination naming conventions](/consul/commands/intention#source-and-destination-naming).
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -507,7 +507,7 @@ $ curl \
-> **Deprecated** - This endpoint is deprecated in Consul 1.9.0 in favor of
[deleting by name](#delete-intention-by-name) or editing the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entry for the destination.
This endpoint deletes a specific intention.
@@ -638,7 +638,7 @@ The corresponding CLI command is [`consul intention match`](/consul/commands/int
as shown in the [source and destination naming conventions](/consul/commands/intention#source-and-destination-naming).
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/coordinate.mdx b/website/content/api-docs/coordinate.mdx
index ab00205e6e1f..16df8a201719 100644
--- a/website/content/api-docs/coordinate.mdx
+++ b/website/content/api-docs/coordinate.mdx
@@ -78,7 +78,7 @@ within the same area.
This endpoint returns the LAN network coordinates for all nodes in a given
datacenter.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------------- | ------------------ |
diff --git a/website/content/api-docs/discovery-chain.mdx b/website/content/api-docs/discovery-chain.mdx
index d84bd293c4e4..5ff117b11478 100644
--- a/website/content/api-docs/discovery-chain.mdx
+++ b/website/content/api-docs/discovery-chain.mdx
@@ -9,17 +9,17 @@ description: The /discovery-chain endpoints are for interacting with the discove
-> **1.6.0+:** The discovery chain API is available in Consul versions 1.6.0 and newer.
~> This is a low-level API primarily targeted at developers building external
-[service mesh proxy integrations](/consul/docs/connect/proxies/integrate). Future
+[service mesh proxy integrations](/consul/docs/connect/proxy/custom). Future
high-level proxy integration APIs may obviate the need for this API over time.
The `/discovery-chain` endpoint returns the compiled [discovery
-chain](/consul/docs/connect/manage-traffic/discovery-chain) for a service.
+chain](/consul/docs/manage-traffic/discovery-chain) for a service.
This will fetch all related [configuration
-entries](/consul/docs/agent/config-entries) and render them into a form suitable
-for use by a [service mesh proxy](/consul/docs/connect/proxies) implementation. This
+entries](/consul/docs/fundamentals/config-entry) and render them into a form suitable
+for use by a [service mesh proxy](/consul/docs/connect/proxy) implementation. This
is a key component of [L7 Traffic
-Management](/consul/docs/connect/manage-traffic).
+Management](/consul/docs/manage-traffic).
## Read Compiled Discovery Chain
@@ -66,14 +66,14 @@ The table below shows this endpoint's support for
### JSON Request Body Schema
- `OverrideConnectTimeout` `(duration: 0s)` - Overrides the final [connect
- timeout](/consul/docs/connect/config-entries/service-resolver#connecttimeout) for
+ timeout](/consul/docs/reference/config-entry/service-resolver#connecttimeout) for
any service resolved in the compiled chain.
This value comes from the `connect_timeout_ms` key in the opaque `config` field of the [upstream
configuration](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference).
- `OverrideProtocol` `(string: "")` - Overrides the final
- [protocol](/consul/docs/connect/config-entries/service-defaults#protocol) used in
+ [protocol](/consul/docs/reference/config-entry/service-defaults#protocol) used in
the compiled discovery chain.
If the chain ordinarily would be TCP and an L7 protocol is passed here the
diff --git a/website/content/api-docs/event.mdx b/website/content/api-docs/event.mdx
index 2e283fc46072..179397ee55f4 100644
--- a/website/content/api-docs/event.mdx
+++ b/website/content/api-docs/event.mdx
@@ -88,10 +88,10 @@ $ curl \
This endpoint returns the most recent events (up to 256) known by the agent. As a
consequence of how the [event command](/consul/commands/event) works, each
agent may have a different view of the events. Events are broadcast using the
-[gossip protocol](/consul/docs/architecture/gossip), so they have no global ordering
+[gossip protocol](/consul/docs/concept/gossip), so they have no global ordering
nor do they make a promise of delivery.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------- | ------------------ |
@@ -146,7 +146,7 @@ $ curl \
The semantics of this endpoint's blocking queries are slightly different. Most
blocking queries provide a monotonic index and block until a newer index is
available. This can be supported as a consequence of the total ordering of the
-[consensus protocol](/consul/docs/architecture/consensus). With gossip, there is no
+[consensus protocol](/consul/docs/concept/consensus). With gossip, there is no
ordering, and instead `X-Consul-Index` maps to the newest event that matches the
query.
diff --git a/website/content/api-docs/exported-services.mdx b/website/content/api-docs/exported-services.mdx
index f7c89d4999a5..6224655264e9 100644
--- a/website/content/api-docs/exported-services.mdx
+++ b/website/content/api-docs/exported-services.mdx
@@ -12,7 +12,7 @@ description: The /exported-services endpoint lists exported services and their c
The `/exported-services` endpoint returns a list of exported services, as well as the admin partitions and cluster peers that consume the services.
-This list consists of the services that were exported using an [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services). Sameness groups and wildcards in the configuration entry are expanded in the response.
+This list consists of the services that were exported using an [`exported-services` configuration entry](/consul/docs/reference/config-entry/exported-services). Sameness groups and wildcards in the configuration entry are expanded in the response.
## List Exported Services
@@ -36,7 +36,7 @@ The table below shows this endpoint's support for
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/features/consistency.mdx b/website/content/api-docs/features/consistency.mdx
index 746b062ab4c6..e0dd185e8a30 100644
--- a/website/content/api-docs/features/consistency.mdx
+++ b/website/content/api-docs/features/consistency.mdx
@@ -22,7 +22,7 @@ to fine-tune these trade-offs for their own use case at two levels:
Consul servers are responsible for maintaining state information like the registration and
health status of services and nodes. To protect this state against the potential failure of
Consul servers, this state is replicated across three or more Consul servers in production using a
-[consensus protocol](/consul/docs/architecture/consensus).
+[consensus protocol](/consul/docs/concept/consensus).
One Consul server is elected leader and acts as the ultimate authority on Consul's state.
If a majority of Consul servers agree to a state change, the leader is responsible for recording
@@ -74,8 +74,8 @@ Each HTTP API endpoint documents its support for the three read consistency mode
~> **Scaling read requests**: The most effective way to increase read scalability
is to convert non-`stale` reads to `stale` reads. If most requests are already
`stale` reads and additional load reduction is desired, use Consul Enterprise
-[redundancy zones](/consul/docs/enterprise/redundancy) or
-[read replicas](/consul/docs/enterprise/read-scale)
+[redundancy zones](/consul/docs/manage/scale/redundancy-zone) or
+[read replicas](/consul/docs/manage/scale/read-replica)
to spread `stale` reads across additional, _non-voting_ Consul servers.
Non-voting servers enhance read scalability without increasing the number
of voting servers; adding more then 5 voting servers is not recommended because
@@ -111,7 +111,7 @@ When making a request across federated Consul datacenters, requests are forwarde
a local server to any remote server. Once in the remote datacenter, the request path
is the same as a [local request with the same consistency mode](#intra-datacenter-request-behavior).
The following diagrams show the cross-datacenter request paths when Consul servers in datacenters are
-[federated either directly or via mesh gateways](/consul/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways).
+[federated either directly or via mesh gateways](/consul/docs/east-west/mesh-gateway/enable).
@@ -131,7 +131,7 @@ The following diagrams show the cross-datacenter request paths when Consul serve
### Consul DNS Queries
-When DNS queries are issued to [Consul's DNS interface](/consul/docs/services/discovery/dns-overview),
+When DNS queries are issued to [Consul's DNS interface](/consul/docs/discover/dns),
Consul uses the `stale` consistency mode by default when interfacing with its
underlying Consul service discovery HTTP APIs
([Catalog](/consul/api-docs/catalog), [Health](/consul/api-docs/health), and [Prepared Query](/consul/api-docs/query)).
@@ -272,7 +272,7 @@ Note that some HTTP API endpoints support a `cached` parameter which has some of
semantics as `stale` consistency mode but different trade offs. This behavior is described in
[agent caching feature documentation](/consul/api-docs/features/caching)
-
-[`dns_config.allow_stale`]: /consul/docs/agent/config/config-files#allow_stale
-[`dns_config.max_stale`]: /consul/docs/agent/config/config-files#max_stale
-[`discovery_max_stale`]: /consul/docs/agent/config/config-files#discovery_max_stale
+
+[`dns_config.allow_stale`]: /consul/docs/reference/agent/configuration-file/dns#allow_stale
+[`dns_config.max_stale`]: /consul/docs/reference/agent/configuration-file/dns#max_stale
+[`discovery_max_stale`]: /consul/docs/reference/agent/configuration-file/general#discovery_max_stale
diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx
index c6182f441b5d..adc4ec93e32d 100644
--- a/website/content/api-docs/health.mdx
+++ b/website/content/api-docs/health.mdx
@@ -21,7 +21,7 @@ use the [`/agent/check`](/consul/api-docs/agent/check) endpoints.
This endpoint returns the checks specific to the node provided on the path.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------- | ------------------ |
@@ -115,7 +115,7 @@ the following selectors and filter operations being supported:
This endpoint returns the checks associated with the service provided on the
path.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------------------- | ------------------ |
@@ -205,7 +205,7 @@ This endpoint returns the service instances providing the service indicated on t
Users can also build in support for dynamic load balancing and other features by
incorporating the use of health checks.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------------- | ------------------ |
@@ -222,7 +222,7 @@ The table below shows this endpoint's support for
| `YES` 1 | `all` | `background refresh` | `node:read,service:read` |
- 1some query parameters will use the streaming backend for blocking queries.
+ 1some query parameters will use the streaming backend for blocking queries.
### Path Parameters
@@ -238,7 +238,7 @@ The table below shows this endpoint's support for
ascending order based on the estimated round trip time from that node. Passing
`?near=_agent` uses the agent's node for the sort.
~> **Note:** Using `near` will ignore
- [`use_streaming_backend`](/consul/docs/agent/config/config-files#use_streaming_backend) and always
+ [`use_streaming_backend`](/consul/docs/reference/agent/configuration-file/general#use_streaming_backend) and always
use blocking queries, because the data required to sort the results is not available
to the streaming backend.
@@ -265,16 +265,16 @@ The table below shows this endpoint's support for
- `merge-central-config` - Include this flag in a request for `connect-proxy` kind or `*-gateway` kind
services to return a fully resolved service definition that includes merged values from the
- [proxy-defaults/global](/consul/docs/connect/config-entries/proxy-defaults) and
- [service-defaults/:service](/consul/docs/connect/config-entries/service-defaults) config entries.
- Returning a fully resolved service definition is useful when a service was registered using the
+ [proxy-defaults/global](/consul/docs/reference/config-entry/proxy-defaults) and
+ [service-defaults/:service](/consul/docs/reference/config-entry/service-defaults) config entries.
+ Returning a fully resolved service definition is useful when a service was registered using the
[/catalog/register](/consul/api-docs/catalog#register_entity) endpoint, which does not automatically merge config entries.
- `ns` `(string: "")` - Specifies the namespace of the service.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
- `sg` `(string: "")` - Specifies the sameness group the service is a member of to
- facilitate requests to identical services in other peers or partitions.
+ facilitate requests to identical services in other peers or partitions.
### Sample Request
@@ -420,7 +420,7 @@ This will include both proxies and native integrations. A service may
register both mesh-capable and incapable services at the same time,
so this endpoint may be used to filter only the mesh-capable endpoints.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------------- | ------------------ |
@@ -434,9 +434,9 @@ Parameters and response format are the same as
-> **1.8.0+:** This API is available in Consul versions 1.8.0 and later.
This endpoint returns the service instances providing an [ingress
-gateway](/consul/docs/connect/gateways/ingress-gateway) for a service in a given datacenter.
+gateway](/consul/docs/north-south/ingress-gateway) for a service in a given datacenter.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------------------------- | ------------------ |
@@ -452,7 +452,7 @@ endpoint does not support the `peer` query parameter and the [streaming backend]
This endpoint returns the checks in the state provided on the path.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ---------------------- | ------------------ |
diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx
index 20d0021e30cd..9ce832702d12 100644
--- a/website/content/api-docs/index.mdx
+++ b/website/content/api-docs/index.mdx
@@ -37,7 +37,7 @@ The following API endpoints give you control over access to services in your net
Use the following API endpoints enable network observability.
- [`/status`](/consul/api-docs/status): Debug your Consul datacenter by returning low-level Raft information about Consul server peers.
-- [`/agent/metrics`](/consul/api-docs/agent#view-metrics): Retrieve metrics for the most recent intervals that have finished. For more information about metrics, refer to [Telemetry](/consul/docs/agent/telemetry).
+- [`/agent/metrics`](/consul/api-docs/agent#view-metrics): Retrieve metrics for the most recent intervals that have finished. For more information about metrics, refer to [Telemetry](/consul/docs/monitor/telemetry/agent).
## Manage Consul
@@ -56,4 +56,4 @@ The following API endpoints enable you to dynamically configure your services.
- [`/event`](/consul/api-docs/event): Start a custom event that you can use to build scripts and automations.
- [`/kv`](/consul/api-docs/kv): Add, remove, and update metadata stored in the Consul KV store.
-- [`/session`](/consul/api-docs/session): Create and manage [sessions](/consul/docs/dynamic-app-config/sessions) in Consul. You can use sessions to build distributed and granular locks to ensure nodes are properly writing to the Consul KV store.
+- [`/session`](/consul/api-docs/session): Create and manage [sessions](/consul/docs/automate/session) in Consul. You can use sessions to build distributed and granular locks to ensure nodes are properly writing to the Consul KV store.
diff --git a/website/content/api-docs/kv.mdx b/website/content/api-docs/kv.mdx
index 3eef6eb27f71..8ed1d16572a5 100644
--- a/website/content/api-docs/kv.mdx
+++ b/website/content/api-docs/kv.mdx
@@ -79,7 +79,7 @@ The corresponding CLI command is [`consul kv get`](/consul/commands/kv/get).
For recursive lookups, the namespace may be specified as '\*'
to return results for all namespaces.
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -212,7 +212,7 @@ The corresponding CLI command is [`consul kv put`](/consul/commands/kv/put).
session has locked the key.**
For an example of how to use the lock feature, check the
- [Leader Election tutorial](/consul/docs/dynamic-app-config/sessions/application-leader-election).
+ [Leader Election tutorial](/consul/docs/automate/application-leader-election).
- `release` `(string: "")` - Supply a session ID to use in a release operation. This is
useful when paired with `?acquire=` as it allows clients to yield a lock. This
@@ -222,7 +222,7 @@ The corresponding CLI command is [`consul kv put`](/consul/commands/kv/put).
- `ns` `(string: "")` - Specifies the namespace to query.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Payload
@@ -293,7 +293,7 @@ The corresponding CLI command is [`consul kv delete`](/consul/commands/kv/delete
- `ns` `(string: "")` - Specifies the namespace to query.
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/namespaces.mdx b/website/content/api-docs/namespaces.mdx
index 8c963d80a9f0..b6371a80298b 100644
--- a/website/content/api-docs/namespaces.mdx
+++ b/website/content/api-docs/namespaces.mdx
@@ -67,7 +67,7 @@ The corresponding CLI command is [`consul namespace create`](/consul/commands/na
- `Meta` `(map: )` - Specifies arbitrary KV metadata
to associate with the namespace.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -173,7 +173,7 @@ The corresponding CLI command is [`consul namespace read`](/consul/commands/name
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -278,7 +278,7 @@ The corresponding CLI command is [`consul namespace update`](/consul/commands/na
- `Meta` `(map: )` - Specifies arbitrary KV metadata
to associate with the namespace.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
### Sample Payload
@@ -385,7 +385,7 @@ The corresponding CLI command is [`consul namespace delete`](/consul/commands/na
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -437,7 +437,7 @@ $ curl --request DELETE \
This endpoint lists all the Namespaces. The output will be filtered based on the
privileges of the ACL token used for the request.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------- | ------------------ |
@@ -460,7 +460,7 @@ The corresponding CLI command is [`consul namespace list`](/consul/commands/name
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/operator/autopilot.mdx b/website/content/api-docs/operator/autopilot.mdx
index e59898862a26..e71cd09eac62 100644
--- a/website/content/api-docs/operator/autopilot.mdx
+++ b/website/content/api-docs/operator/autopilot.mdx
@@ -68,7 +68,7 @@ $ curl \
```
For more information about the Autopilot configuration options, see the
-[agent configuration section](/consul/docs/agent/config/config-files#autopilot).
+[agent configuration section](/consul/docs/reference/agent/configuration-file/general#autopilot).
## Update Configuration
@@ -327,7 +327,7 @@ $ curl \
- `OptimisticFailuretolerance` is the maximum number
of servers that could fail in the right order over the right period of time
without causing an outage. This value is only useful when using the [Redundancy
- Zones feature](/consul/docs/enterprise/redundancy) with autopilot.
+ Zones feature](/consul/docs/manage/scale/redundancy-zone) with autopilot.
- `Servers` is a mapping of server ID to an object holding detailed information about that server.
The format of the detailed info is documented in its own section.
diff --git a/website/content/api-docs/operator/keyring.mdx b/website/content/api-docs/operator/keyring.mdx
index ec0b14fc8db7..582caf9e265b 100644
--- a/website/content/api-docs/operator/keyring.mdx
+++ b/website/content/api-docs/operator/keyring.mdx
@@ -9,7 +9,7 @@ description: |-
# Keyring Operator HTTP API
The `/operator/keyring` endpoints allow for management of the gossip encryption
-keyring. Please see the [Gossip Protocol Guide](/consul/docs/architecture/gossip) for
+keyring. Please see the [Gossip Protocol Guide](/consul/docs/concept/gossip) for
more details on the gossip protocol and its use.
## List Gossip Encryption Keys
diff --git a/website/content/api-docs/operator/raft.mdx b/website/content/api-docs/operator/raft.mdx
index 7b55284644c5..c5f8f9696b85 100644
--- a/website/content/api-docs/operator/raft.mdx
+++ b/website/content/api-docs/operator/raft.mdx
@@ -11,7 +11,7 @@ description: |-
The `/operator/raft` endpoints provide tools for management of Raft the
consensus subsystem and cluster quorum.
-Please see the [Consensus Protocol Guide](/consul/docs/architecture/consensus) for
+Please see the [Consensus Protocol Guide](/consul/docs/concept/consensus) for
more information about Raft consensus protocol and its use.
## Read Configuration
diff --git a/website/content/api-docs/operator/segment.mdx b/website/content/api-docs/operator/segment.mdx
index e65268e2ad91..78214e95480d 100644
--- a/website/content/api-docs/operator/segment.mdx
+++ b/website/content/api-docs/operator/segment.mdx
@@ -18,7 +18,7 @@ The network area functionality described here is available only in
later. Network segments are operator-defined sections of agents on the LAN, typically
isolated from other segments by network configuration.
-Please check the [Network Segments documentation](/consul/docs/enterprise/network-segments/network-segments-overview) for more details.
+Please check the [Network Segments documentation](/consul/docs/multi-tenant/network-segment) for more details.
## List Network Segments
diff --git a/website/content/api-docs/peering.mdx b/website/content/api-docs/peering.mdx
index f0e4d7767710..1dca013430ac 100644
--- a/website/content/api-docs/peering.mdx
+++ b/website/content/api-docs/peering.mdx
@@ -34,7 +34,7 @@ The table below shows this endpoint's support for
and configuration entries such as `service-intentions`. This field must be a
valid DNS hostname label.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
- `ServerExternalAddresses` `([]string: )` - The addresses for the cluster that generates the peering token. Addresses take the form `{host or IP}:port`.
You can specify one or more load balancers or external IPs that route external traffic to this cluster's Consul servers.
@@ -99,7 +99,7 @@ The table below shows this endpoint's support for
and configuration entries such as `service-intentions`. This field must be a
valid DNS hostname label.
-@include 'http-api-body-options-partition.mdx'
+@include 'legacy/http-api-body-options-partition.mdx'
- `PeeringToken` `(string: )` - The peering token fetched from the
peer cluster.
@@ -159,7 +159,7 @@ The table below shows this endpoint's support for
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -231,7 +231,7 @@ The table below shows this endpoint's support for
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
@@ -263,7 +263,7 @@ $ curl --request DELETE \
This endpoint lists all the peerings.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | ------------- | ------------------ |
@@ -281,7 +281,7 @@ The table below shows this endpoint's support for
### Query Parameters
-@include 'http-api-query-parms-partition.mdx'
+@include 'legacy/http-api-query-parms-partition.mdx'
### Sample Request
diff --git a/website/content/api-docs/query.mdx b/website/content/api-docs/query.mdx
index f8c71bfb065a..a42c16ff6c2a 100644
--- a/website/content/api-docs/query.mdx
+++ b/website/content/api-docs/query.mdx
@@ -11,7 +11,7 @@ The `/query` endpoints create, update, destroy, and execute prepared queries.
Prepared queries allow you to register a complex service query and then execute
it later by specifying the query ID or name. Consul returns a set of healthy nodes that provide a given
service. Refer to
-[Enable Dynamic DNS Queries](/consul/docs/services/discovery/dns-dynamic-lookups) for additional information.
+[Enable Dynamic DNS Queries](/consul/docs/discover/service/dynamic) for additional information.
Check the [Geo Failover tutorial](/consul/tutorials/developer-discovery/automate-geo-failover) for details and
examples for using prepared queries to implement geo failover for services.
@@ -215,7 +215,7 @@ The table below shows this endpoint's support for
service instances in the local datacenter.
This option cannot be used with `NearestN` or `Datacenters`.
- - `Peer` `(string: "")` - Specifies a [cluster peer](/consul/docs/connect/cluster-peering) to use for
+ - `Peer` `(string: "")` - Specifies a [cluster peer](/consul/docs/east-west/cluster-peering) to use for
failover.
- `Datacenter` `(string: "")` - Specifies a WAN federated datacenter to forward the
@@ -325,7 +325,7 @@ $ curl \
This endpoint returns a list of all prepared queries.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| ------ | -------- | ------------------ |
diff --git a/website/content/api-docs/session.mdx b/website/content/api-docs/session.mdx
index 3f13f178d711..21d5b2faa8da 100644
--- a/website/content/api-docs/session.mdx
+++ b/website/content/api-docs/session.mdx
@@ -77,7 +77,7 @@ The table below shows this endpoint's support for
86400s). If provided, the session is invalidated if it is not renewed before
the TTL expires. The lowest practical TTL should be used to keep the number of
managed sessions low. When locks are forcibly expired, such as when following
- the [leader election pattern](/consul/docs/dynamic-app-config/sessions/application-leader-election) in an application,
+ the [leader election pattern](/consul/docs/automate/application-leader-election) in an application,
sessions may not be reaped for up to double this TTL, so long TTL
values (> 1 hour) should be avoided. Valid time units include "s", "m" and "h".
@@ -230,7 +230,7 @@ If the session does not exist, an empty JSON list `[]` is returned.
This endpoint returns the active sessions for a given node.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| :----- | :-------------------- | ------------------ |
@@ -293,7 +293,7 @@ $ curl \
This endpoint returns the list of active sessions.
-@include 'http_api_results_filtered_by_acls.mdx'
+@include 'legacy/http_api_results_filtered_by_acls.mdx'
| Method | Path | Produces |
| :----- | :-------------- | ------------------ |
diff --git a/website/content/api-docs/snapshot.mdx b/website/content/api-docs/snapshot.mdx
index 69460e9e3f70..a4cb8a244d1f 100644
--- a/website/content/api-docs/snapshot.mdx
+++ b/website/content/api-docs/snapshot.mdx
@@ -10,7 +10,7 @@ description: |-
The `/snapshot` endpoints save and restore the state of the Consul
servers for disaster recovery. Snapshots include all state managed by Consul's
-Raft [consensus protocol](/consul/docs/architecture/consensus).
+Raft [consensus protocol](/consul/docs/concept/consensus).
## Generate Snapshot
diff --git a/website/content/commands/acl/auth-method/create.mdx b/website/content/commands/acl/auth-method/create.mdx
index 4f1015cfb61d..61a02008df1b 100644
--- a/website/content/commands/acl/auth-method/create.mdx
+++ b/website/content/commands/acl/auth-method/create.mdx
@@ -67,9 +67,9 @@ Usage: `consul acl auth-method create [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
- `-namespace-rule-bind-namespace=` - Namespace to bind on match. Can
use `${var}` interpolation. Added in Consul 1.8.0.
@@ -80,9 +80,9 @@ Usage: `consul acl auth-method create [options] [args]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/auth-method/delete.mdx b/website/content/commands/acl/auth-method/delete.mdx
index acb095c5872b..3a54675a07c4 100644
--- a/website/content/commands/acl/auth-method/delete.mdx
+++ b/website/content/commands/acl/auth-method/delete.mdx
@@ -31,15 +31,15 @@ Usage: `consul acl auth-method delete [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/auth-method/list.mdx b/website/content/commands/acl/auth-method/list.mdx
index 42d7285ca76d..8e9a2d2b0be2 100644
--- a/website/content/commands/acl/auth-method/list.mdx
+++ b/website/content/commands/acl/auth-method/list.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl auth-method list`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/auth-method/read.mdx b/website/content/commands/acl/auth-method/read.mdx
index 015852f9684c..877d240a9f98 100644
--- a/website/content/commands/acl/auth-method/read.mdx
+++ b/website/content/commands/acl/auth-method/read.mdx
@@ -36,15 +36,15 @@ Usage: `consul acl auth-method read [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/auth-method/update.mdx b/website/content/commands/acl/auth-method/update.mdx
index d328ea999c2b..35889b811ebe 100644
--- a/website/content/commands/acl/auth-method/update.mdx
+++ b/website/content/commands/acl/auth-method/update.mdx
@@ -72,9 +72,9 @@ Usage: `consul acl auth-method update [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
- `-namespace-rule-bind-namespace=` - Namespace to bind on match. Can
use `${var}` interpolation. Added in Consul 1.8.0.
@@ -85,9 +85,9 @@ Usage: `consul acl auth-method update [options] [args]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/binding-rule/create.mdx b/website/content/commands/acl/binding-rule/create.mdx
index b32b2ce934e5..e94bbb545ccb 100644
--- a/website/content/commands/acl/binding-rule/create.mdx
+++ b/website/content/commands/acl/binding-rule/create.mdx
@@ -47,15 +47,15 @@ Usage: `consul acl binding-rule create [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/binding-rule/delete.mdx b/website/content/commands/acl/binding-rule/delete.mdx
index 9164f62a7c0e..cf82d3cf6099 100644
--- a/website/content/commands/acl/binding-rule/delete.mdx
+++ b/website/content/commands/acl/binding-rule/delete.mdx
@@ -32,15 +32,15 @@ Usage: `consul acl binding-rule delete [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/binding-rule/list.mdx b/website/content/commands/acl/binding-rule/list.mdx
index cbb9c184c821..8e5052640a36 100644
--- a/website/content/commands/acl/binding-rule/list.mdx
+++ b/website/content/commands/acl/binding-rule/list.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl binding-rule list`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/binding-rule/read.mdx b/website/content/commands/acl/binding-rule/read.mdx
index 50f1e8112f88..5809a721c9e3 100644
--- a/website/content/commands/acl/binding-rule/read.mdx
+++ b/website/content/commands/acl/binding-rule/read.mdx
@@ -37,15 +37,15 @@ Usage: `consul acl binding-rule read [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/binding-rule/update.mdx b/website/content/commands/acl/binding-rule/update.mdx
index 22ad67e0cfc9..06dc4cf5fa00 100644
--- a/website/content/commands/acl/binding-rule/update.mdx
+++ b/website/content/commands/acl/binding-rule/update.mdx
@@ -54,15 +54,15 @@ Usage: `consul acl binding-rule update [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/bootstrap.mdx b/website/content/commands/acl/bootstrap.mdx
index e5b43a3ec6b4..626609be86fb 100644
--- a/website/content/commands/acl/bootstrap.mdx
+++ b/website/content/commands/acl/bootstrap.mdx
@@ -49,6 +49,6 @@ Policies:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/acl/policy/create.mdx b/website/content/commands/acl/policy/create.mdx
index e178536515e0..f952e811d201 100644
--- a/website/content/commands/acl/policy/create.mdx
+++ b/website/content/commands/acl/policy/create.mdx
@@ -49,15 +49,15 @@ Usage: `consul acl policy create [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/policy/delete.mdx b/website/content/commands/acl/policy/delete.mdx
index 2b611f5a78dc..bc6b2b15f706 100644
--- a/website/content/commands/acl/policy/delete.mdx
+++ b/website/content/commands/acl/policy/delete.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl policy delete [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/policy/list.mdx b/website/content/commands/acl/policy/list.mdx
index ad3b114b9793..3d19ad8240c4 100644
--- a/website/content/commands/acl/policy/list.mdx
+++ b/website/content/commands/acl/policy/list.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl policy list`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/policy/read.mdx b/website/content/commands/acl/policy/read.mdx
index 28e2f51a79d0..1e07142a8abb 100644
--- a/website/content/commands/acl/policy/read.mdx
+++ b/website/content/commands/acl/policy/read.mdx
@@ -39,15 +39,15 @@ Usage: `consul acl policy read [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/policy/update.mdx b/website/content/commands/acl/policy/update.mdx
index edc529a5da9f..dab0f2a51d57 100644
--- a/website/content/commands/acl/policy/update.mdx
+++ b/website/content/commands/acl/policy/update.mdx
@@ -58,15 +58,15 @@ Usage: `consul acl policy update [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/role/create.mdx b/website/content/commands/acl/role/create.mdx
index 82989a2697a1..c1d83df84f9e 100644
--- a/website/content/commands/acl/role/create.mdx
+++ b/website/content/commands/acl/role/create.mdx
@@ -52,15 +52,15 @@ Usage: `consul acl role create [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/role/delete.mdx b/website/content/commands/acl/role/delete.mdx
index 9a39e97ab843..61b721edbb29 100644
--- a/website/content/commands/acl/role/delete.mdx
+++ b/website/content/commands/acl/role/delete.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl role delete [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/role/list.mdx b/website/content/commands/acl/role/list.mdx
index afd8c75a41a9..fe75b99cfcc4 100644
--- a/website/content/commands/acl/role/list.mdx
+++ b/website/content/commands/acl/role/list.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl role list`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/role/read.mdx b/website/content/commands/acl/role/read.mdx
index aa5008f57d5c..3f04c96ce2ba 100644
--- a/website/content/commands/acl/role/read.mdx
+++ b/website/content/commands/acl/role/read.mdx
@@ -39,15 +39,15 @@ Usage: `consul acl role read [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/role/update.mdx b/website/content/commands/acl/role/update.mdx
index 1308e445ba7f..6b27cd5778cf 100644
--- a/website/content/commands/acl/role/update.mdx
+++ b/website/content/commands/acl/role/update.mdx
@@ -63,15 +63,15 @@ Usage: `consul acl role update [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/set-agent-token.mdx b/website/content/commands/acl/set-agent-token.mdx
index c56b706871b2..f77b3317405c 100644
--- a/website/content/commands/acl/set-agent-token.mdx
+++ b/website/content/commands/acl/set-agent-token.mdx
@@ -14,7 +14,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/agent/token/:type](/consul/api-doc
This command updates the ACL tokens currently in use by the agent. It can be used to introduce
ACL tokens to the agent for the first time, or to update tokens that were initially loaded from
the agent's configuration. Tokens are not persisted unless
-[`acl.enable_token_persistence`](/consul/docs/agent/config/config-files#acl_enable_token_persistence)
+[`acl.enable_token_persistence`](/consul/docs/reference/agent/configuration-file/acl#acl_enable_token_persistence)
is `true`, so tokens will need to be updated again if that option is `false` and
the agent is restarted.
@@ -38,7 +38,7 @@ The token types are:
- `dns` - Specifies the token that agents use to request information needed to respond to DNS queries.
If the `dns` token is not set, Consul uses the `default` token by default.
Because the `default` token allows unauthenticated HTTP API access to list nodes and services, we
- strongly recommend using the `dns` token. Create DNS tokens using the [templated policy](/consul/docs/security/acl/tokens/create/create-a-dns-token#create_a_dns_token) option
+ strongly recommend using the `dns` token. Create DNS tokens using the [templated policy](/consul/docs/secure/acl/token/dns) option
to ensure that the token has the permissions needed to respond to all DNS queries.
- `config_file_service_registration` - This is the token that the agent uses to
@@ -61,9 +61,9 @@ The token types are:
### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/templated-policy/list.mdx b/website/content/commands/acl/templated-policy/list.mdx
index bc77f4c3f526..008b291f8ed5 100644
--- a/website/content/commands/acl/templated-policy/list.mdx
+++ b/website/content/commands/acl/templated-policy/list.mdx
@@ -27,9 +27,9 @@ Usage: `consul acl templated-policy list`
### API options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Example
diff --git a/website/content/commands/acl/templated-policy/preview.mdx b/website/content/commands/acl/templated-policy/preview.mdx
index cfffa6db7e55..08608b8b1b7d 100644
--- a/website/content/commands/acl/templated-policy/preview.mdx
+++ b/website/content/commands/acl/templated-policy/preview.mdx
@@ -36,15 +36,15 @@ Usage: `consul acl templated-policy preview [options] [args]`
### Enterprise options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
### API options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/templated-policy/read.mdx b/website/content/commands/acl/templated-policy/read.mdx
index 94e461e4ea94..cedebe8718d4 100644
--- a/website/content/commands/acl/templated-policy/read.mdx
+++ b/website/content/commands/acl/templated-policy/read.mdx
@@ -28,9 +28,9 @@ Usage: `consul acl templated-policy read [options] [args]`
### API options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/clone.mdx b/website/content/commands/acl/token/clone.mdx
index 937097fb284d..45420e0ee80a 100644
--- a/website/content/commands/acl/token/clone.mdx
+++ b/website/content/commands/acl/token/clone.mdx
@@ -38,15 +38,15 @@ Usage: `consul acl token clone [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/create.mdx b/website/content/commands/acl/token/create.mdx
index 8cd5fb6b948b..92907226dd87 100644
--- a/website/content/commands/acl/token/create.mdx
+++ b/website/content/commands/acl/token/create.mdx
@@ -66,15 +66,15 @@ Usage: `consul acl token create [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/delete.mdx b/website/content/commands/acl/token/delete.mdx
index 13a3ca26e6e3..6d727ddcf112 100644
--- a/website/content/commands/acl/token/delete.mdx
+++ b/website/content/commands/acl/token/delete.mdx
@@ -32,15 +32,15 @@ Usage: `consul acl token delete [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/list.mdx b/website/content/commands/acl/token/list.mdx
index fd3b6c215114..de9c05a740e0 100644
--- a/website/content/commands/acl/token/list.mdx
+++ b/website/content/commands/acl/token/list.mdx
@@ -34,15 +34,15 @@ Usage: `consul acl token list`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/read.mdx b/website/content/commands/acl/token/read.mdx
index dfcbea12d8e1..934e6718673f 100644
--- a/website/content/commands/acl/token/read.mdx
+++ b/website/content/commands/acl/token/read.mdx
@@ -43,15 +43,15 @@ Usage: `consul acl token read [options] [args]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/acl/token/update.mdx b/website/content/commands/acl/token/update.mdx
index 1f89d828641c..5a72b36d5368 100644
--- a/website/content/commands/acl/token/update.mdx
+++ b/website/content/commands/acl/token/update.mdx
@@ -93,15 +93,15 @@ instead.
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/agent.mdx b/website/content/commands/agent.mdx
index 6e2a7524119e..2029327b5240 100644
--- a/website/content/commands/agent.mdx
+++ b/website/content/commands/agent.mdx
@@ -9,11 +9,565 @@ description: >-
# Consul Agent
-The `consul agent` command is the heart of Consul: it runs the agent that
-performs the important task of maintaining membership information,
-running checks, announcing services, handling queries, etc.
-
-Due to the power and flexibility of this command, the Consul agent
-is documented in its own section. See the [Consul Agent](/consul/docs/agent)
-section for more information on how to use this command and the
-options it has.
+This page describes the available command-line options for the Consul agent.
+
+## Usage
+
+```shell-session
+consul agent
+```
+
+## Environment Variables
+
+Environment variables **cannot** be used to configure the Consul client. They
+_can_ be used when running other `consul` CLI commands that connect with a
+running agent, e.g. `CONSUL_HTTP_ADDR=192.168.0.1:8500 consul members`.
+
+REFER TO [Consul Commands](/consul/commands#environment-variables) for more
+information.
+
+## Options
+
+- `-auto-reload-config` ((#\_auto_reload_config)) - This option directs Consul to automatically reload the [reloadable configuration options](/consul/docs/agent/config#reloadable-configuration) when configuration files change.
+ Consul also watches the certificate and key files specified with the `cert_file` and `key_file` parameters and reloads the configuration if the files are updated.
+
+- `-check_output_max_size` - Override the default
+ limit of 4k for maximum size of checks, this is a positive value. By limiting this
+ size, it allows to put less pressure on Consul servers when many checks are having
+ a very large output in their checks. In order to completely disable check output
+ capture, it is possible to use [`discard_check_output`](/consul/docs/reference/agent/configuration-file/general#discard_check_output).
+
+- `-client` ((#\_client)) - The address to which Consul will bind client
+ interfaces, including the HTTP and DNS servers. By default, this is "127.0.0.1",
+ allowing only loopback connections. In Consul 1.0 and later this can be set to
+ a space-separated list of addresses to bind to, or a [go-sockaddr]
+ template that can potentially resolve to multiple addresses.
+
+
+
+ ```shell
+ $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "type" "ipv6" | join "address" " " }}'
+ ```
+
+
+
+
+
+ ```shell
+ $ consul agent -dev -client '{{ GetPrivateInterfaces | join "address" " " }} {{ GetAllInterfaces | include "flags" "loopback" | join "address" " " }}'
+ ```
+
+
+
+
+
+ ```shell
+ $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "name" "br.*" | join "address" " " }}'
+ ```
+
+
+
+- `-data-dir` ((#\_data_dir)) - This flag provides a data directory for
+ the agent to store state. This is required for all agents. The directory should
+ be durable across reboots. This is especially critical for agents that are running
+ in server mode as they must be able to persist cluster state. Additionally, the
+ directory must support the use of filesystem locking, meaning some types of mounted
+ folders (e.g. VirtualBox shared folders) may not be suitable.
+
+ **Note:** both server and non-server agents may store ACL tokens in the state in this directory so read access may grant access to any tokens on servers and to any tokens used during service registration on non-servers. On Unix-based platforms the files are written with 0600 permissions so you should ensure only trusted processes can execute as the same user as Consul. On Windows, you should ensure the directory has suitable permissions configured as these will be inherited.
+
+- `-datacenter` ((#\_datacenter)) - This flag controls the datacenter in
+ which the agent is running. If not provided, it defaults to "dc1". Consul has first-class
+ support for multiple datacenters, but it relies on proper configuration. Nodes
+ in the same datacenter should be on a single LAN.
+
+~> **Warning:** This `datacenter` string must conform to [RFC 1035 DNS label requirements](https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.1),
+ consisting solely of letters, digits, and hyphens, with a maximum
+ length of 63 characters, and no hyphens at the beginning or end of the label.
+ Non-compliant names create Consul DNS entries incompatible with PKI X.509 certificate generation.
+
+- `-dev` ((#\_dev)) - Enable development server mode. This is useful for
+ quickly starting a Consul agent with all persistence options turned off, enabling
+ an in-memory server which can be used for rapid prototyping or developing against
+ the API. In this mode, [service mesh is enabled](/consul/docs/fundamentals/config-entry) and
+ will by default create a new root CA certificate on startup. This mode is **not**
+ intended for production use as it does not write any data to disk. The gRPC port
+ is also defaulted to `8502` in this mode.
+
+- `-disable-keyring-file` ((#\_disable_keyring_file)) - If set, the keyring
+ will not be persisted to a file. Any installed keys will be lost on shutdown, and
+ only the given `-encrypt` key will be available on startup. This defaults to false.
+
+- `-enable-script-checks` ((#\_enable_script_checks)) This controls whether
+ [health checks that execute scripts](/consul/docs/register/health-check/vm) are enabled on this
+ agent, and defaults to `false` so operators must opt-in to allowing these. This
+ was added in Consul 0.9.0.
+
+ ~> **Security Warning:** Enabling script checks in some configurations may
+ introduce a remote execution vulnerability which is known to be targeted by
+ malware. We strongly recommend `-enable-local-script-checks` instead. See [this
+ blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations)
+ for more details.
+
+- `-enable-local-script-checks` ((#\_enable_local_script_checks))
+ Like [`-enable-script-checks`](#_enable_script_checks), but only enable them when
+ they are defined in the local configuration files. Script checks defined in HTTP
+ API registrations will still not be allowed.
+
+- `-encrypt` ((#\_encrypt)) - Specifies the secret key to use for encryption
+ of Consul network traffic. This key must be 32-bytes that are Base64-encoded. The
+ easiest way to create an encryption key is to use [`consul keygen`](/consul/commands/keygen).
+ All nodes within a cluster must share the same encryption key to communicate. The
+ provided key is automatically persisted to the data directory and loaded automatically
+ whenever the agent is restarted. This means that to encrypt Consul's gossip protocol,
+ this option only needs to be provided once on each agent's initial startup sequence.
+ If it is provided after Consul has been initialized with an encryption key, then
+ the provided key is ignored and a warning will be displayed.
+
+- `-grpc-port` ((#\_grpc_port)) - the gRPC API port to listen on. Default
+ -1 (gRPC disabled). See [ports](/consul/docs/agent/config#ports-used) documentation for more detail.
+
+- `-hcl` ((#\_hcl)) - A HCL configuration fragment. This HCL configuration
+ fragment is appended to the configuration and allows to specify the full range
+ of options of a config file on the command line. This option can be specified multiple
+ times. This was added in Consul 1.0.
+
+- `-http-port` ((#\_http_port)) - the HTTP API port to listen on. This overrides
+ the default port 8500. This option is very useful when deploying Consul to an environment
+ which communicates the HTTP port through the environment e.g. PaaS like CloudFoundry,
+ allowing you to set the port directly via a Procfile.
+
+- `-https-port` ((#\_https_port)) - the HTTPS API port to listen on. Default
+ -1 (https disabled). See [ports](/consul/docs/agent/config#ports-used) documentation for more detail.
+
+- `-default-query-time` ((#\_default_query_time)) - This flag controls the
+ amount of time a blocking query will wait before Consul will force a response.
+ This value can be overridden by the `wait` query parameter. Note that Consul applies
+ some jitter on top of this time. Defaults to 300s.
+
+- `-max-query-time` ((#\_max_query_time)) - this flag controls the maximum
+ amount of time a blocking query can wait before Consul will force a response. Consul
+ applies jitter to the wait time. The jittered time will be capped to this time.
+ Defaults to 600s.
+
+- `-pid-file` ((#\_pid_file)) - This flag provides the file path for the
+ agent to store its PID. This is useful for sending signals (for example, `SIGINT`
+ to close the agent or `SIGHUP` to update check definitions) to the agent.
+
+- `-protocol` ((#\_protocol)) - The Consul protocol version to use. Consul
+ agents speak protocol 2 by default, however agents will automatically use protocol > 2 when speaking to compatible agents. This should be set only when [upgrading](/consul/docs/upgrade). You can view the protocol versions supported by Consul by running `consul version`.
+
+- `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version
+ of the Raft consensus protocol used for server communications. This must be set
+ to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](/consul/docs/reference/agent/configuration-file/general#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/consul/docs/upgrade/version-specific#raft-protocol-version-compatibility) for more details.
+
+- `-segment` ((#\_segment)) - This flag is used to set
+ the name of the network segment the agent belongs to. An agent can only join and
+ communicate with other agents within its network segment. Ensure the [join
+ operation uses the correct port for this segment](/consul/docs/multi-tenant/network-segment/vm#configure-clients-to-join-segments).
+ Review the [Network Segments documentation](/consul/docs/multi-tenant/network-segment/vm)
+ for more details. By default, this is an empty string, which is the ``
+ network segment.
+
+ ~> **Warning:** The `segment` flag cannot be used with the [`partition`](/consul/docs/reference/agent/configuration-file/general#partition) option.
+
+## Advertise Address Options
+
+- `-advertise` ((#\_advertise)) - The advertise address is used to change
+ the address that we advertise to other nodes in the cluster. By default, the [`-bind`](#_bind)
+ address is advertised. However, in some cases, there may be a routable address
+ that cannot be bound. This flag enables gossiping a different address to support
+ this. If this address is not routable, the node will be in a constant flapping
+ state as other nodes will treat the non-routability as a failure. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
+ template that is resolved at runtime.
+
+
+
+ ```shell-session
+ $ consul agent -advertise '{{ GetInterfaceIP "eth0" }}'
+ ```
+
+
+
+- `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used
+ to change the address that we advertise to server nodes joining through the WAN.
+ This can also be set on client agents when used in combination with the [`translate_wan_addrs`](/consul/docs/reference/agent/configuration-file/general#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address
+ is advertised. However, in some cases all members of all datacenters cannot be
+ on the same physical or virtual network, especially on hybrid setups mixing cloud
+ and private datacenters. This flag enables server nodes gossiping through the public
+ network for the WAN while using private VLANs for gossiping to each other and their
+ client agents, and it allows client agents to be reached at this address when being
+ accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](/consul/docs/reference/agent/configuration-file/general#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
+ template that is resolved at runtime.
+
+## Address Bind Options
+
+- `-bind` ((#\_bind)) - The address that should be bound to for internal
+ cluster communications. This is an IP address that should be reachable by all other
+ nodes in the cluster. By default, this is "0.0.0.0", meaning Consul will bind to
+ all addresses on the local machine and will [advertise](#_advertise)
+ the private IPv4 address to the rest of the cluster. If there are multiple private
+ IPv4 addresses available, Consul will exit with an error at startup. If you specify
+ `"[::]"`, Consul will [advertise](#_advertise) the public
+ IPv6 address. If there are multiple public IPv6 addresses available, Consul will
+ exit with an error at startup. Consul uses both TCP and UDP and the same port for
+ both. If you have any firewalls, be sure to allow both protocols. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
+ template that must resolve at runtime to a single address. Some example templates:
+
+
+
+ ```shell-session
+ $ consul agent -bind '{{ GetPrivateInterfaces | include "network" "10.0.0.0/8" | attr "address" }}'
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -bind '{{ GetInterfaceIP "eth0" }}'
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -bind '{{ GetAllInterfaces | include "name" "^eth" | include "flags" "forwardable|up" | attr "address" }}'
+ ```
+
+
+
+- `-serf-wan-bind` ((#\_serf_wan_bind)) - The address that should be bound
+ to for Serf WAN gossip communications. By default, the value follows the same rules
+ as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind`
+ option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
+ template that is resolved at runtime.
+
+- `-serf-lan-bind` ((#\_serf_lan_bind)) - The address that should be bound
+ to for Serf LAN gossip communications. This is an IP address that should be reachable
+ by all other LAN nodes in the cluster. By default, the value follows the same rules
+ as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind`
+ option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
+ template that is resolved at runtime.
+
+## Bootstrap Options
+
+- `-bootstrap` ((#\_bootstrap)) - This flag is used to control if a server
+ is in "bootstrap" mode. It is important that no more than one server **per** datacenter
+ be running in this mode. Technically, a server in bootstrap mode is allowed to
+ self-elect as the Raft leader. It is important that only a single node is in this
+ mode; otherwise, consistency cannot be guaranteed as multiple nodes are able to
+ self-elect. It is not recommended to use this flag after a cluster has been bootstrapped.
+
+- `-bootstrap-expect` ((#\_bootstrap_expect)) - This flag provides the number
+ of expected servers in the datacenter. Either this value should not be provided
+ or the value must agree with other servers in the cluster. When provided, Consul
+ waits until the specified number of servers are available and then bootstraps the
+ cluster. This allows an initial leader to be elected automatically. This cannot
+ be used in conjunction with the legacy [`-bootstrap`](#_bootstrap) flag. This flag
+ requires [`-server`](#_server) mode.
+
+## Configuration File Options
+
+- `-config-file` ((#\_config_file)) - A configuration file to load. For
+ more information on the format of this file, read the [Configuration Files](/consul/docs/reference/agent)
+ section. This option can be specified multiple times to load multiple configuration
+ files. If it is specified multiple times, configuration files loaded later will
+ merge with configuration files loaded earlier. During a config merge, single-value
+ keys (string, int, bool) will simply have their values replaced while list types
+ will be appended together.
+
+- `-config-dir` ((#\_config_dir)) - A directory of configuration files to
+ load. Consul will load all files in this directory with the suffix ".json" or ".hcl".
+ The load order is alphabetical, and the same merge routine is used as with
+ the [`config-file`](#_config_file) option above. This option can be specified multiple
+ times to load multiple directories. Sub-directories of the config directory are
+ not loaded. For more information on the format of the configuration files, refer to
+ the [Configuration Files](/consul/docs/reference/agent) section.
+
+- `-config-format` ((#\_config_format)) - The format of the configuration
+ files to load. Normally, Consul detects the format of the config files from the
+ ".json" or ".hcl" extension. Setting this option to either "json" or "hcl" forces
+ Consul to interpret any file with or without extension to be interpreted in that
+ format.
+
+## DNS and Domain Options
+
+- `-dns-port` ((#\_dns_port)) - the DNS port to listen on. This overrides
+ the default port 8600. This is available in Consul 0.7 and later.
+
+- `-domain` ((#\_domain)) - By default, Consul responds to DNS queries in
+ the "consul." domain. This flag can be used to change that domain. All queries
+ in this domain are assumed to be handled by Consul and will not be recursively
+ resolved.
+
+- `-alt-domain` ((#\_alt_domain)) - This flag allows Consul to respond to
+ DNS queries in an alternate domain, in addition to the primary domain. If unset,
+ no alternate domain is used.
+
+ In Consul 1.10.4 and later, Consul DNS responses will use the same domain as in the query (`-domain` or `-alt-domain`) where applicable.
+ PTR query responses will always use `-domain`, since the desired domain cannot be included in the query.
+
+- `-recursor` ((#\_recursor)) - Specifies the address of an upstream DNS
+ server. This option may be provided multiple times, and is functionally equivalent
+ to the [`recursors` configuration option](/consul/docs/reference/agent/configuration-file/general#recursors).
+
+- `-join` ((#\_join)) - **Deprecated in Consul 1.15. This flag will be removed in a future version of Consul. Use the `-retry-join` flag instead.**
+ This is an alias of [`-retry-join`](#_retry_join).
+
+- `-retry-join` ((#\_retry_join)) - Address of another agent to join upon starting up. Joining is
+ retried until success. Once the agent joins successfully as a member, it will not attempt to join
+ again. After joining, the agent solely maintains its membership via gossip. This option can be
+ specified multiple times to specify multiple agents to join. By default, the agent won't join any
+ nodes when it starts up. The value can contain IPv4, IPv6, or DNS addresses. Literal IPv6
+ addresses must be enclosed in square brackets. If multiple values are given, they are tried and
+ retried in the order listed until the first succeeds.
+
+ This supports [Cloud Auto-Joining](#cloud-auto-joining).
+
+ This can be dynamically defined with a [go-sockaddr] template that is resolved at runtime.
+
+ If Consul is running on a non-default Serf LAN port, you must specify the port number in the address when using the `-retry-join` flag. Alternatively, you can specify the custom port number as the default in the agent's [`ports.serf_lan`](/consul/docs/reference/agent/configuration-file/general#serf_lan_port) configuration or with the [`-serf-lan-port`](#_serf_lan_port) command line flag when starting the agent.
+
+ If your network contains network segments, refer to the [network segments documentation](/consul/docs/multi-tenant/network-segment/vm) for additional information.
+
+ Here are some examples of using `-retry-join`:
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "consul.domain.internal"
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "10.0.4.67"
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "192.0.2.10:8304"
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "[::1]:8301"
+ ```
+
+
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "consul.domain.internal" -retry-join "10.0.4.67"
+ ```
+
+
+
+ ### Cloud Auto-Joining
+
+ The `-retry-join` option accepts a unified interface using the
+ [go-discover](https://github.com/hashicorp/go-discover) library for doing
+ automatic cluster joining using cloud metadata. For more information, see
+ the [Cloud Auto-join page](/consul/docs/deploy/server/cloud-auto-join).
+
+
+
+ ```shell-session
+ $ consul agent -retry-join "provider=aws tag_key=..."
+ ```
+
+
+
+- `-retry-interval` ((#\_retry_interval)) - Time to wait between join attempts.
+ Defaults to 30s.
+
+- `-retry-max` ((#\_retry_max)) - The maximum number of join attempts if using
+ [`-retry-join`](#_retry_join) before exiting with return code 1. By default, this is set
+ to 0 which is interpreted as infinite retries.
+
+- `-join-wan` ((#\_join_wan)) - **Deprecated in Consul 1.15. This flag will be removed in a future version of Consul. Use the `-retry-join-wan` flag instead.**
+ This is an alias of [`-retry-join-wan`](#_retry_join_wan)
+
+- `-retry-join-wan` ((#\_retry_join_wan)) - Address of another WAN agent to join upon starting up.
+ WAN joining is retried until success. This can be specified multiple times to specify multiple WAN
+ agents to join. If multiple values are given, they are tried and retried in the order listed
+ until the first succeeds. By default, the agent won't WAN join any nodes when it starts up.
+
+ This supports [Cloud Auto-Joining](#cloud-auto-joining).
+
+ This can be dynamically defined with a [go-sockaddr] template that is resolved at runtime.
+
+- `-primary-gateway` ((#\_primary_gateway)) - Similar to [`-retry-join-wan`](#_retry_join_wan)
+ but allows retrying discovery of fallback addresses for the mesh gateways in the
+ primary datacenter if the first attempt fails. This is useful for cases where we
+ know the address will become available eventually. [Cloud Auto-Joining](#cloud-auto-joining)
+ is supported as well as [go-sockaddr]
+ templates. This was added in Consul 1.8.0.
+
+- `-retry-interval-wan` ((#\_retry_interval_wan)) - Time to wait between
+ [`-retry-join-wan`](#_retry_join_wan) attempts. Defaults to 30s.
+
+- `-retry-max-wan` ((#\_retry_max_wan)) - The maximum number of [`-retry-join-wan`](#_join_wan)
+ attempts to be made before exiting with return code 1. By default, this is set
+ to 0 which is interpreted as infinite retries.
+
+- `-rejoin` ((#\_rejoin)) - When provided, Consul will ignore a previous
+ leave and attempt to rejoin the cluster when starting. By default, Consul treats
+ leave as a permanent intent and does not attempt to join the cluster again when
+ starting. This flag allows the previous state to be used to rejoin the cluster.
+
+## Log Options
+
+- `-log-file` ((#\_log_file)) - writes all the Consul agent log messages
+ to a file at the path indicated by this flag. The filename defaults to `consul.log`.
+ When the log file rotates, this value is used as a prefix for the path to the log and the current timestamp is
+ appended to the file name. If the value ends in a path separator, `consul-`
+ will be appended to the value. If the file name is missing an extension, `.log`
+ is appended. For example, setting `log-file` to `/var/log/` would result in a log
+ file path of `/var/log/consul.log`. `log-file` can be combined with
+ [`-log-rotate-bytes`](#_log_rotate_bytes) and [`-log-rotate-duration`](#_log_rotate_duration)
+ for a fine-grained log rotation experience. After rotation, the path and filename take the following form:
+ `/var/log/consul-{timestamp}.log`
+
+- `-log-rotate-bytes` ((#\_log_rotate_bytes)) - to specify the number of
+ bytes that should be written to a log before it needs to be rotated. Unless specified,
+ there is no limit to the number of bytes that can be written to a log file.
+
+- `-log-rotate-duration` ((#\_log_rotate_duration)) - to specify the maximum
+ duration a log should be written to before it needs to be rotated. Must be a duration
+ value such as 30s. Defaults to 24h.
+
+- `-log-rotate-max-files` ((#\_log_rotate_max_files)) - to specify the maximum
+ number of older log file archives to keep. Defaults to 0 (no files are ever deleted).
+ Set to -1 to discard old log files when a new one is created.
+
+- `-log-level` ((#\_log_level)) - The level of logging to show after the
+ Consul agent has started. This defaults to "info". The available log levels are
+ "trace", "debug", "info", "warn", and "error". You can always connect to an agent
+ via [`consul monitor`](/consul/commands/monitor) and use any log level. Also,
+ the log level can be changed during a config reload.
+
+- `-log-json` ((#\_log_json)) - This flag enables the agent to output logs
+ in a JSON format. By default this is false.
+
+- `-syslog` ((#\_syslog)) - This flag enables logging to syslog. This is
+ only supported on Linux and macOS. It will result in an error if provided on Windows.
+
+## Node Options
+
+- `-node` ((#\_node)) - The name of this node in the cluster. This must
+ be unique within the cluster. By default this is the hostname of the machine.
+ The node name cannot contain whitespace or quotation marks. To query the node from DNS, the name must only contain alphanumeric characters and hyphens (`-`).
+
+- `-node-id` ((#\_node_id)) - Available in Consul 0.7.3 and later, this
+ is a unique identifier for this node across all time, even if the name of the node
+ or address changes. This must be in the form of a hex string, 36 characters long,
+ such as `adf4238a-882b-9ddc-4a9d-5b6758e4159e`. If this isn't supplied, which is
+ the most common case, then the agent will generate an identifier at startup and
+ persist it in the [data directory](#_data_dir) so that it will remain the same
+ across agent restarts. Information from the host will be used to generate a deterministic
+ node ID if possible, unless [`-disable-host-node-id`](#_disable_host_node_id) is
+ set to true.
+
+- `-node-meta` ((#\_node_meta)) - Available in Consul 0.7.3 and later, this
+ specifies an arbitrary metadata key/value pair to associate with the node, of the
+ form `key:value`. This can be specified multiple times. Node metadata pairs have
+ the following restrictions:
+
+ - A maximum of 64 key/value pairs can be registered per node.
+ - Metadata keys must be between 1 and 128 characters (inclusive) in length
+ - Metadata keys must contain only alphanumeric, `-`, and `_` characters.
+ - Metadata keys must not begin with the `consul-` prefix; that is reserved for internal use by Consul.
+ - Metadata values must be between 0 and 512 (inclusive) characters in length.
+ - Metadata values for keys beginning with `rfc1035-` are encoded verbatim in DNS TXT requests, otherwise
+ the metadata kv-pair is encoded according [RFC1464](https://www.ietf.org/rfc/rfc1464.txt).
+
+- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to
+ true will prevent Consul from using information from the host to generate a deterministic
+ node ID, and will instead generate a random node ID which will be persisted in
+ the data directory. This is useful when running multiple Consul agents on the same
+ host for testing. This defaults to false in Consul prior to version 0.8.5 and in
+ 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based
+ IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/), which
+ is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in
+ to host-based IDs then Consul and Nomad will use information on the host to automatically
+ assign the same ID in both systems.
+
+## Serf Options
+
+- `-serf-lan-allowed-cidrs` ((#\_serf_lan_allowed_cidrs)) - The Serf LAN allowed CIDRs allow to accept incoming
+ connections for Serf only from several networks (multiple values are supported).
+ Those networks are specified with CIDR notation (eg: 192.168.1.0/24).
+ This is available in Consul 1.8 and later.
+
+- `-serf-lan-port` ((#\_serf_lan_port)) - the Serf LAN port to listen on.
+ This overrides the default Serf LAN port 8301. This is available in Consul 1.2.2
+ and later.
+
+- `-serf-wan-allowed-cidrs` ((#\_serf_wan_allowed_cidrs)) - The Serf WAN allowed CIDRs allow to accept incoming
+ connections for Serf only from several networks (multiple values are supported).
+ Those networks are specified with CIDR notation (eg: 192.168.1.0/24).
+ This is available in Consul 1.8 and later.
+
+- `-serf-wan-port` ((#\_serf_wan_port)) - the Serf WAN port to listen on.
+ This overrides the default Serf WAN port 8302. This is available in Consul 1.2.2
+ and later.
+
+## Server Options
+
+- `-server` ((#\_server)) - This flag is used to control if an agent is
+ in server or client mode. When provided, an agent will act as a Consul server.
+ Each Consul cluster must have at least one server and ideally no more than 5 per
+ datacenter. All servers participate in the Raft consensus algorithm to ensure that
+ transactions occur in a consistent, linearizable manner. Transactions modify cluster
+ state, which is maintained on all server nodes to ensure availability in the case
+ of node failure. Server nodes also participate in a WAN gossip pool with server
+ nodes in other datacenters. Servers act as gateways to other datacenters and forward
+ RPC traffic as appropriate.
+
+- `-server-port` ((#\_server_port)) - the server RPC port to listen on.
+ This overrides the default server RPC port 8300. This is available in Consul 1.2.2
+ and later.
+
+- `-non-voting-server` ((#\_non_voting_server)) - **This field
+ is deprecated in Consul 1.9.1. See the [`-read-replica`](#_read_replica) flag instead.**
+
+- `-read-replica` ((#\_read_replica)) - This
+ flag is used to make the server not participate in the Raft quorum, and have it
+ only receive the data replication stream. This can be used to add read scalability
+ to a cluster in cases where a high volume of reads to servers are needed.
+
+## UI Options
+
+- `-ui` ((#\_ui)) - Enables the built-in web UI server and the required
+ HTTP routes. This eliminates the need to maintain the Consul web UI files separately
+ from the binary.
+
+- `-ui-dir` ((#\_ui_dir)) - This flag provides the directory containing
+ the Web UI resources for Consul. This will automatically enable the Web UI. The
+ directory must be readable to the agent. Starting with Consul version 0.7.0 and
+ later, the Web UI assets are included in the binary so this flag is no longer necessary;
+ specifying only the `-ui` flag is enough to enable the Web UI. Specifying both
+ the '-ui' and '-ui-dir' flags will result in an error.
+
+
+- `-ui-content-path` ((#\_ui\_content\_path)) - This flag provides the option
+ to change the path the Consul UI loads from and will be displayed in the browser.
+ By default, the path is `/ui/`, for example `http://localhost:8500/ui/`. Only alphanumerics,
+ `-`, and `_` are allowed in a custom path.`/v1/` is not allowed as it would overwrite
+ the API endpoint.
+
+
+
+[go-sockaddr]: https://godoc.org/github.com/hashicorp/go-sockaddr/template
diff --git a/website/content/commands/catalog/datacenters.mdx b/website/content/commands/catalog/datacenters.mdx
index c9b70f5ecc3a..f34f556ffb31 100644
--- a/website/content/commands/catalog/datacenters.mdx
+++ b/website/content/commands/catalog/datacenters.mdx
@@ -38,6 +38,6 @@ Usage: `consul catalog datacenters [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/catalog/nodes.mdx b/website/content/commands/catalog/nodes.mdx
index 6efa3ad0716c..6fbc34d18030 100644
--- a/website/content/commands/catalog/nodes.mdx
+++ b/website/content/commands/catalog/nodes.mdx
@@ -85,10 +85,10 @@ Usage: `consul catalog nodes [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/catalog/services.mdx b/website/content/commands/catalog/services.mdx
index a57adcb4a927..eabde39e1887 100644
--- a/website/content/commands/catalog/services.mdx
+++ b/website/content/commands/catalog/services.mdx
@@ -69,12 +69,12 @@ Usage: `consul catalog services [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/config/delete.mdx b/website/content/commands/config/delete.mdx
index 085b0e34c411..c68f8baa987e 100644
--- a/website/content/commands/config/delete.mdx
+++ b/website/content/commands/config/delete.mdx
@@ -12,7 +12,7 @@ Command: `consul config delete`
Corresponding HTTP API Endpoint: [\[DELETE\] /v1/config/:kind/:name](/consul/api-docs/config#delete-configuration)
The `config delete` command deletes the configuration entry specified by the
-kind and name. See the [configuration entries docs](/consul/docs/agent/config-entries)
+kind and name. See the [configuration entries docs](/consul/docs/fundamentals/config-entry)
for more details about configuration entries.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication). Configuration of
@@ -59,13 +59,13 @@ config entry. This is used in combination with the -cas flag.
#### Enterprise Options
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/config/index.mdx b/website/content/commands/config/index.mdx
index f485cef75e33..59b79a001542 100644
--- a/website/content/commands/config/index.mdx
+++ b/website/content/commands/config/index.mdx
@@ -12,9 +12,9 @@ Command: `consul config`
The `config` command is used to interact with Consul's central configuration
system. It exposes commands for creating, updating, reading, and deleting
different kinds of config entries. See the
-[agent configuration](/consul/docs/agent/config/config-files#enable_central_service_config)
+[agent configuration](/consul/docs/reference/agent/configuration-file/general#enable_central_service_config)
for more information on how to enable this functionality for centrally
-configuring services and [configuration entries docs](/consul/docs/agent/config-entries) for a description
+configuring services and [configuration entries docs](/consul/docs/fundamentals/config-entry) for a description
of the configuration entries content.
## Usage
diff --git a/website/content/commands/config/list.mdx b/website/content/commands/config/list.mdx
index 5f55a7bcff72..c3ac3d64d19a 100644
--- a/website/content/commands/config/list.mdx
+++ b/website/content/commands/config/list.mdx
@@ -12,7 +12,7 @@ Command: `consul config list`
Corresponding HTTP API Endpoint: [\[GET\] /v1/config/:kind](/consul/api-docs/config#list-configurations)
The `config list` command lists all given config entries of the given kind.
-See the [configuration entries docs](/consul/docs/agent/config-entries) for more
+See the [configuration entries docs](/consul/docs/fundamentals/config-entry) for more
details about configuration entries.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication). Configuration of
@@ -48,13 +48,13 @@ Usage: `consul config list [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/config/read.mdx b/website/content/commands/config/read.mdx
index c54a60dfec16..ff17a6f63351 100644
--- a/website/content/commands/config/read.mdx
+++ b/website/content/commands/config/read.mdx
@@ -13,7 +13,7 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/config/:kind/:name](/consul/api-do
The `config read` command reads the config entry specified by the given
kind and name and outputs its JSON representation. See the
-[configuration entries docs](/consul/docs/agent/config-entries) for more
+[configuration entries docs](/consul/docs/fundamentals/config-entry) for more
details about configuration entries.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication). Configuration of
@@ -52,13 +52,13 @@ Usage: `consul config read [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/config/write.mdx b/website/content/commands/config/write.mdx
index f466654c617f..3419dd98f3b2 100644
--- a/website/content/commands/config/write.mdx
+++ b/website/content/commands/config/write.mdx
@@ -12,7 +12,7 @@ Command: `consul config write`
Corresponding HTTP API Endpoint: [\[PUT\] /v1/config](/consul/api-docs/config#apply-configuration)
The `config write` command creates or updates a centralized config entry.
-See the [configuration entries docs](/consul/docs/agent/config-entries) for more
+See the [configuration entries docs](/consul/docs/fundamentals/config-entry) for more
details about configuration entries.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication). Configuration of
@@ -53,13 +53,13 @@ Usage: `consul config write [options] FILE`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
@@ -74,7 +74,7 @@ From stdin:
### Config Entry examples
All config entries must have a `Kind` when registered. See
-[Service Mesh - Config Entries](/consul/docs/connect/config-entries) for the list of
+[Service Mesh - Config Entries](/consul/docs/fundamentals/config-entry) for the list of
supported config entries.
#### Service defaults
@@ -91,7 +91,7 @@ service use the `http` protocol.
}
```
-For more information, refer to the [service defaults configuration reference](/consul/docs/connect/config-entries/service-defaults).
+For more information, refer to the [service defaults configuration reference](/consul/docs/reference/config-entry/service-defaults).
#### Proxy defaults
@@ -110,4 +110,4 @@ Envoy proxies.
}
```
-For more information, refer to the [proxy defaults configuration reference](/consul/docs/connect/config-entries/proxy-defaults).
+For more information, refer to the [proxy defaults configuration reference](/consul/docs/reference/config-entry/proxy-defaults).
diff --git a/website/content/commands/connect/ca.mdx b/website/content/commands/connect/ca.mdx
index 350d18e6d318..a16224d04e31 100644
--- a/website/content/commands/connect/ca.mdx
+++ b/website/content/commands/connect/ca.mdx
@@ -13,7 +13,7 @@ Command: `consul connect ca`
This command is used to interact with Consul service mesh's Certificate Authority
managed by the connect subsystem.
It can be used to view or modify the current CA configuration. Refer to the
-[service mesh CA documentation](/consul/docs/connect/ca) for more information.
+[service mesh CA documentation](/consul/docs/secure-mesh/certificate) for more information.
```text
Usage: consul connect ca [options] [args]
@@ -57,9 +57,9 @@ Corresponding HTTP API Endpoint: [\[GET\] /v1/connect/ca/configuration](/consul/
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
The output looks like this:
@@ -123,6 +123,6 @@ The return code will indicate success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/connect/envoy.mdx b/website/content/commands/connect/envoy.mdx
index 913adb981dff..8b883bfd510b 100644
--- a/website/content/commands/connect/envoy.mdx
+++ b/website/content/commands/connect/envoy.mdx
@@ -36,7 +36,7 @@ Usage: `consul connect envoy [options] [-- pass-through options]`
#### Envoy Options for both Sidecars and Gateways
-- `-proxy-id` - The [proxy service](/consul/docs/connect/proxies/proxy-config-reference) ID.
+- `-proxy-id` - The [proxy service](/consul/docs/reference/proxy/connect-proxy) ID.
This service ID must already be registered with the local agent unless a gateway is being
registered with the `-register` flag. As of Consul 1.8.0, this can also be
specified via the `CONNECT_PROXY_ID` environment variable.
@@ -56,7 +56,7 @@ Usage: `consul connect envoy [options] [-- pass-through options]`
ACL token from `-token` or the environment and so should be handled as a secret.
This token grants the identity of any service it has `service:write` permission
for and so can be used to access any upstream service that that service is
- allowed to access by [service mesh intentions](/consul/docs/connect/intentions).
+ allowed to access by [service mesh intentions](/consul/docs/secure-mesh/intention).
- `-envoy-version` - The version of envoy that is being started. Default is
`1.23.1`. This is required so that the correct configuration can be generated.
@@ -167,14 +167,14 @@ compatibility with Envoy and prevent potential issues. Default is `false`.
If Envoy is configured as an ingress gateway, Consul instantiates a `/ready` HTTP endpoint at the specified IP and port. Consul uses `/ready` HTTP endpoints
to check gateway health. Ingress gateways also use the specified IP when instantiating user-defined listeners configured in the
- [ingress gateway configuration entry](/consul/docs/connect/config-entries/ingress-gateway).
+ [ingress gateway configuration entry](/consul/docs/reference/config-entry/ingress-gateway).
~> **Note**: Ensure that user-defined ingress gateway listeners use a
different port than the port specified in `-address` so that they do not
conflict with the health check endpoint.
- `-admin-access-log-path` -
- **Deprecated in Consul 1.15.0 in favor of [`proxy-defaults` access logs](/consul/docs/connect/config-entries/proxy-defaults#accesslogs).**
+ **Deprecated in Consul 1.15.0 in favor of [`proxy-defaults` access logs](/consul/docs/reference/config-entry/proxy-defaults#accesslogs).**
The path to write the access log for the administration
server. If no access log is desired specify `/dev/null`. By default it will
use `/dev/null`.
@@ -199,9 +199,9 @@ compatibility with Envoy and prevent potential issues. Default is `false`.
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
@@ -215,19 +215,19 @@ proxy configuration needed.
be used instead. The scheme can also be set to HTTPS by setting the
environment variable CONSUL_HTTP_SSL=true. This may be a unix domain socket
using `unix:///path/to/socket` if the [agent is configured to
- listen](/consul/docs/agent/config/config-files#addresses) that way.
+ listen](/consul/docs/reference/agent/configuration-file/general#addresses) that way.
-> **Note:** gRPC uses the same TLS
settings as the HTTPS API. If HTTPS is enabled then gRPC will require HTTPS
as well.
- @include 'http_api_options_client.mdx'
+ @include 'legacy/http_api_options_client.mdx'
## Examples
In the following examples, a local service instance is registered on the local agent with a
sidecar proxy (using the [sidecar service
-registration](/consul/docs/connect/proxies/deploy-sidecar-services) helper):
+registration](/consul/docs/connect/proxy/sidecar) helper):
```hcl
service {
diff --git a/website/content/commands/connect/expose.mdx b/website/content/commands/connect/expose.mdx
index 79be1e0e60ae..5f6f67c7f475 100644
--- a/website/content/commands/connect/expose.mdx
+++ b/website/content/commands/connect/expose.mdx
@@ -14,7 +14,7 @@ Command: `consul connect expose`
The connect expose subcommand is used to expose a mesh-enabled service
through an Ingress gateway by modifying the gateway's configuration and adding
an intention to allow traffic from the gateway to the service. See the
-[Ingress gateway documentation](/consul/docs/connect/gateways/ingress-gateway) for more information
+[Ingress gateway documentation](/consul/docs/north-south/ingress-gateway) for more information
about Ingress gateways.
```text
@@ -46,9 +46,9 @@ Usage: consul connect expose [options]
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/connect/proxy.mdx b/website/content/commands/connect/proxy.mdx
index 0a4571df5f9e..d073a87a3235 100644
--- a/website/content/commands/connect/proxy.mdx
+++ b/website/content/commands/connect/proxy.mdx
@@ -24,14 +24,14 @@ Usage: `consul connect proxy [options]`
- `-sidecar-for` - The _ID_ (not name if they differ) of the service instance
this proxy will represent. The target service doesn't need to exist on the
local agent yet but a [sidecar proxy
- registration](/consul/docs/connect/proxies/deploy-sidecar-services) with
+ registration](/consul/docs/connect/proxy/sidecar) with
`proxy.destination_service_id` equal to the passed value must be present. If
multiple proxy registrations targeting the same local service instance are
present the command will error and `-proxy-id` should be used instead.
This can also be specified via the `CONNECT_SIDECAR_FOR` environment variable.
- `-proxy-id` - The [proxy
- service](/consul/docs/connect/proxies/proxy-config-reference) ID on the
+ service](/consul/docs/reference/proxy/connect-proxy) ID on the
local agent. This must already be present on the local agent. This option
can also be specified via the `CONNECT_PROXY_ID` environment variable.
@@ -44,7 +44,7 @@ Usage: `consul connect proxy [options]`
doesn't need to actually exist in the Consul catalog, but proper ACL
permissions (`service:write`) are required. This and the remaining options can
be used to setup a proxy that is not registered already with local config
- [useful for development](/consul/docs/connect/dev).
+ [useful for development](/consul/docs/troubleshoot/mesh).
- `-upstream` - Upstream service to support connecting to. The format should be
'name:addr', such as 'db:8181'. This will make 'db' available on port 8181.
@@ -72,9 +72,9 @@ Usage: `consul connect proxy [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/connect/redirect-traffic.mdx b/website/content/commands/connect/redirect-traffic.mdx
index d59fe7928cf6..8fd968ba0d2f 100644
--- a/website/content/commands/connect/redirect-traffic.mdx
+++ b/website/content/commands/connect/redirect-traffic.mdx
@@ -38,7 +38,7 @@ Usage: `consul connect redirect-traffic [options]`
- `-consul-dns-port` - The port of the Consul DNS resolver. If provided, DNS queries will be redirected to the provided IP address for name resolution.
-- `-proxy-id` - The [proxy service](/consul/docs/connect/proxies/proxy-config-reference) ID.
+- `-proxy-id` - The [proxy service](/consul/docs/reference/proxy/connect-proxy) ID.
This service ID must already be registered with the local agent.
- `-proxy-inbound-port` - The inbound port that the proxy is listening on.
@@ -60,13 +60,13 @@ Usage: `consul connect redirect-traffic [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/debug.mdx b/website/content/commands/debug.mdx
index 6feeb685f452..f78688e1328d 100644
--- a/website/content/commands/debug.mdx
+++ b/website/content/commands/debug.mdx
@@ -69,7 +69,7 @@ all targets for 5 minutes.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Capture Targets
@@ -83,7 +83,7 @@ information when `debug` is running. By default, it captures all information.
| `members` | A list of all the WAN and LAN members in the cluster. |
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
| `logs` | `TRACE` level logs for the target agent, captured for the duration. |
-| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enabled and an ACL token with `operator:read` is provided. |
+| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/reference/agent/configuration-file/general#enable_debug) is set to `true` on the target agent or ACLs are enabled and an ACL token with `operator:read` is provided. |
## Examples
diff --git a/website/content/commands/event.mdx b/website/content/commands/event.mdx
index d89b71d68c4a..086ff05aa462 100644
--- a/website/content/commands/event.mdx
+++ b/website/content/commands/event.mdx
@@ -19,14 +19,14 @@ The `event` command provides a mechanism to fire a custom user event to an
entire datacenter. These events are opaque to Consul, but they can be used
to build scripting infrastructure to do automated deploys, restart services,
or perform any other orchestration action. Events can be handled by
-[using a watch](/consul/docs/dynamic-app-config/watches).
+[using a watch](/consul/docs/automate/watch).
-Under the hood, events are propagated using the [gossip protocol](/consul/docs/architecture/gossip).
+Under the hood, events are propagated using the [gossip protocol](/consul/docs/concept/gossip).
While the details are not important for using events, an understanding of
the semantics is useful. The gossip layer will make a best-effort to deliver
the event, but there is **no guaranteed delivery**. Unlike most Consul data, which is
-replicated using [consensus](/consul/docs/architecture/consensus), event data
+replicated using [consensus](/consul/docs/concept/consensus), event data
is purely peer-to-peer over gossip. This means it is not persisted and does
not have a total ordering. In practice, this means you cannot rely on the
order of message delivery. An advantage however is that events can still
@@ -66,6 +66,6 @@ payload can be provided as the final argument.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/exec.mdx b/website/content/commands/exec.mdx
index 07754bd2b938..16e2451e46ce 100644
--- a/website/content/commands/exec.mdx
+++ b/website/content/commands/exec.mdx
@@ -17,7 +17,7 @@ the `web` service.
Remote execution works by specifying a job, which is stored in the KV store.
Agents are informed about the new job using the [event system](/consul/commands/event),
-which propagates messages via the [gossip protocol](/consul/docs/architecture/gossip).
+which propagates messages via the [gossip protocol](/consul/docs/concept/gossip).
As a result, delivery is best-effort, and there is **no guarantee** of execution.
While events are purely gossip driven, remote execution relies on the KV store
@@ -78,6 +78,6 @@ completion as a script to evaluate.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/force-leave.mdx b/website/content/commands/force-leave.mdx
index 0af9218f233d..c71d49b7d2fc 100644
--- a/website/content/commands/force-leave.mdx
+++ b/website/content/commands/force-leave.mdx
@@ -46,7 +46,7 @@ Usage: `consul force-leave [options] node`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/index.mdx b/website/content/commands/index.mdx
index a3e500811034..f7d1ff2f49f0 100644
--- a/website/content/commands/index.mdx
+++ b/website/content/commands/index.mdx
@@ -95,7 +95,7 @@ Command Options
## Authentication
-When the [ACL system is enabled](/consul/docs/agent/config/config-files#acl) the Consul CLI will
+When the [ACL system is enabled](/consul/docs/reference/agent/configuration-file/acl) the Consul CLI will
require an [ACL token](/consul/docs/security/acl#tokens) to perform API requests.
The ACL token can be provided directly on the command line using the `-token` command line flag,
@@ -242,8 +242,8 @@ CONSUL_TLS_SERVER_NAME=consulserver.domain
Like [`CONSUL_HTTP_ADDR`](#consul_http_addr) but configures the address the
local agent is listening for gRPC requests. Currently gRPC is only used for
-integrating [Envoy proxy](/consul/docs/connect/proxies/envoy) and must be [enabled
-explicitly](/consul/docs/agent/config/config-files#grpc_port) in agent configuration.
+integrating [Envoy proxy](/consul/docs/reference/proxy/envoy) and must be [enabled
+explicitly](/consul/docs/reference/agent/configuration-file/general#grpc_port) in agent configuration.
```
CONSUL_GRPC_ADDR=127.0.0.1:8502
diff --git a/website/content/commands/info.mdx b/website/content/commands/info.mdx
index 0a74422f5806..08eec1a3e26e 100644
--- a/website/content/commands/info.mdx
+++ b/website/content/commands/info.mdx
@@ -19,9 +19,9 @@ There are currently the top-level keys for:
- agent: Provides information about the agent
- consul: Information about the consul library (client or server)
-- raft: Provides info about the Raft [consensus library](/consul/docs/architecture/consensus)
-- serf_lan: Provides info about the LAN [gossip pool](/consul/docs/architecture/gossip)
-- serf_wan: Provides info about the WAN [gossip pool](/consul/docs/architecture/gossip)
+- raft: Provides info about the Raft [consensus library](/consul/docs/concept/consensus)
+- serf_lan: Provides info about the LAN [gossip pool](/consul/docs/concept/gossip)
+- serf_wan: Provides info about the WAN [gossip pool](/consul/docs/concept/gossip)
Here is an example output:
@@ -75,4 +75,4 @@ Usage: `consul info`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/intention/check.mdx b/website/content/commands/intention/check.mdx
index 23ae56ceae4e..13a85510d64e 100644
--- a/website/content/commands/intention/check.mdx
+++ b/website/content/commands/intention/check.mdx
@@ -41,13 +41,13 @@ Usage: `consul intention check [options] SRC DST`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/intention/create.mdx b/website/content/commands/intention/create.mdx
index bbaccc2be2f4..c72241c9f918 100644
--- a/website/content/commands/intention/create.mdx
+++ b/website/content/commands/intention/create.mdx
@@ -10,7 +10,7 @@ description: >-
-> **Deprecated** - This command is deprecated in Consul 1.9.0 in favor of
using the [config entry CLI command](/consul/commands/config/write). To create an
intention, create or modify a
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entry for the destination.
Command: `consul intention create`
@@ -52,13 +52,13 @@ are not supported from commands, but may be from the corresponding HTTP endpoint
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/intention/delete.mdx b/website/content/commands/intention/delete.mdx
index 2cfb97a966db..55b46a371764 100644
--- a/website/content/commands/intention/delete.mdx
+++ b/website/content/commands/intention/delete.mdx
@@ -23,7 +23,7 @@ are not supported from commands, but may be from the corresponding HTTP endpoint
-> **Deprecated** - The one argument form of this command is deprecated in
Consul 1.9.0. Intentions no longer need IDs when represented as
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entries.
## Usage
@@ -37,13 +37,13 @@ Usage:
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/intention/get.mdx b/website/content/commands/intention/get.mdx
index b3db133fd44e..17c5c2112a35 100644
--- a/website/content/commands/intention/get.mdx
+++ b/website/content/commands/intention/get.mdx
@@ -15,7 +15,7 @@ The `intention get` command shows a single intention.
-> **Deprecated** - The one argument form of this command is deprecated in
Consul 1.9.0. Intentions no longer need IDs when represented as
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entries.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication). Configuration of
@@ -37,13 +37,13 @@ Usage:
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/intention/index.mdx b/website/content/commands/intention/index.mdx
index f7ea711a27fb..bfdb319c789a 100644
--- a/website/content/commands/intention/index.mdx
+++ b/website/content/commands/intention/index.mdx
@@ -10,18 +10,18 @@ description: >-
Command: `consul intention`
The `intention` command is used to interact with service mesh
-[intentions](/consul/docs/connect/intentions). It exposes commands for
+[intentions](/consul/docs/secure-mesh/intention). It exposes commands for
creating, updating, reading, deleting, checking, and managing intentions.
This command is available in Consul 1.2 and later.
Use the
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) configuration entry or the [HTTP
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) configuration entry or the [HTTP
API](/consul/api-docs/connect/intentions) to manage intentions.
~> **Deprecated** - This command is deprecated in Consul 1.9.0 in favor of
using the [config entry CLI command](/consul/commands/config/write). To create an
intention, create or modify a
-[`service-intentions`](/consul/docs/connect/config-entries/service-intentions) config
+[`service-intentions`](/consul/docs/reference/config-entry/service-intentions) config
entry for the destination.
## Usage
diff --git a/website/content/commands/intention/list.mdx b/website/content/commands/intention/list.mdx
index 4cdfa5c8a2f7..9fc25c19fca3 100644
--- a/website/content/commands/intention/list.mdx
+++ b/website/content/commands/intention/list.mdx
@@ -29,11 +29,11 @@ Usage:
#### Enterprise Options
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/intention/match.mdx b/website/content/commands/intention/match.mdx
index 3d94939b38bf..8ec41ff21d6a 100644
--- a/website/content/commands/intention/match.mdx
+++ b/website/content/commands/intention/match.mdx
@@ -40,13 +40,13 @@ Usage: `consul intention match [options] SRC_OR_DST`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/join.mdx b/website/content/commands/join.mdx
index 7bb438911d4a..4d33f74b1c1c 100644
--- a/website/content/commands/join.mdx
+++ b/website/content/commands/join.mdx
@@ -46,4 +46,4 @@ command will fail only if Consul was unable to join any of the specified address
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/keygen.mdx b/website/content/commands/keygen.mdx
index c512eeef53f4..df2e0e89ad4e 100644
--- a/website/content/commands/keygen.mdx
+++ b/website/content/commands/keygen.mdx
@@ -12,6 +12,6 @@ description: >-
Command: `consul keygen`
The `keygen` command generates an encryption key that can be used for
-[Consul agent traffic encryption](/consul/docs/security/encryption).
+[Consul agent traffic encryption](/consul/docs/secure/encryption).
The keygen command uses a cryptographically
strong pseudo-random number generator to generate the key.
diff --git a/website/content/commands/keyring.mdx b/website/content/commands/keyring.mdx
index b1c914fddc24..6c6e5aa43f31 100644
--- a/website/content/commands/keyring.mdx
+++ b/website/content/commands/keyring.mdx
@@ -12,7 +12,7 @@ Command: `consul keyring`
Corresponding HTTP API Endpoints: [\[VARIES\] /v1/operator/keyring](/consul/api-docs/operator/keyring)
The `keyring` command is used to examine and modify the encryption keys used in
-Consul's [Gossip Pools](/consul/docs/architecture/gossip). It is capable of
+Consul's [Gossip Pools](/consul/docs/concept/gossip). It is capable of
distributing new encryption keys to the cluster, retiring old encryption keys,
and changing the keys used by the cluster to encrypt messages.
@@ -76,7 +76,7 @@ Only one actionable argument may be specified per run, including `-list`,
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Output
diff --git a/website/content/commands/kv/delete.mdx b/website/content/commands/kv/delete.mdx
index 17216b0ca78b..3f9696146f8f 100644
--- a/website/content/commands/kv/delete.mdx
+++ b/website/content/commands/kv/delete.mdx
@@ -40,15 +40,15 @@ Usage: `consul kv delete [options] KEY_OR_PREFIX`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/kv/export.mdx b/website/content/commands/kv/export.mdx
index c05b2f80f895..0f44dc7218a3 100644
--- a/website/content/commands/kv/export.mdx
+++ b/website/content/commands/kv/export.mdx
@@ -28,15 +28,15 @@ Usage: `consul kv export [options] [PREFIX]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/kv/get.mdx b/website/content/commands/kv/get.mdx
index 6534fe35486b..1e0830043184 100644
--- a/website/content/commands/kv/get.mdx
+++ b/website/content/commands/kv/get.mdx
@@ -56,15 +56,15 @@ Usage: `consul kv get [options] [KEY_OR_PREFIX]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/kv/import.mdx b/website/content/commands/kv/import.mdx
index a960b1b738ce..550a247925e4 100644
--- a/website/content/commands/kv/import.mdx
+++ b/website/content/commands/kv/import.mdx
@@ -31,15 +31,15 @@ Usage: `consul kv import [options] [DATA]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/kv/put.mdx b/website/content/commands/kv/put.mdx
index dcde811fc01c..42026ede1ebd 100644
--- a/website/content/commands/kv/put.mdx
+++ b/website/content/commands/kv/put.mdx
@@ -59,15 +59,15 @@ Usage: `consul kv put [options] KEY [DATA]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/leave.mdx b/website/content/commands/leave.mdx
index 8c3744cc287c..1d63288b4c8a 100644
--- a/website/content/commands/leave.mdx
+++ b/website/content/commands/leave.mdx
@@ -24,7 +24,7 @@ non-graceful leave can affect cluster availability.
Depending on how many Consul servers are running, running `consul leave` on a server explicitly can reduce the quorum
size (which is derived from the number of Consul servers, see
-[deployment_table](/consul/docs/architecture/consensus#deployment_table)).
+[deployment_table](/consul/docs/concept/reliability#deployment-size)).
Even if the cluster used `bootstrap_expect` to set a number of servers and thus quorum size initially,
issuing `consul leave` on a server will reconfigure the cluster to have fewer servers.
This means you could end up with just one server that is still able to commit writes because the quorum size for
@@ -44,4 +44,4 @@ Usage: `consul leave [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/license.mdx b/website/content/commands/license.mdx
index 762e66df43d8..27495050d5e4 100644
--- a/website/content/commands/license.mdx
+++ b/website/content/commands/license.mdx
@@ -15,7 +15,7 @@ Command: `consul license`
The `license` command provides a list of all datacenters that use the Consul Enterprise license applied to the current datacenter.
~> **Warning**: Consul 1.10.0 removed the ability to set and reset the license using the CLI.
-See the [licensing documentation](/consul/docs/enterprise/license/overview) for more information about
+See the [licensing documentation](/consul/docs/enterprise/license) for more information about
Consul Enterprise license management.
If ACLs are enabled then a token with operator privileges may be required in
@@ -159,9 +159,9 @@ Licensed Features:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## get
@@ -200,9 +200,9 @@ Licensed Features:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## reset
@@ -245,6 +245,6 @@ Licensed Features:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/commands/lock.mdx b/website/content/commands/lock.mdx
index 85a15680516f..4c970019706f 100644
--- a/website/content/commands/lock.mdx
+++ b/website/content/commands/lock.mdx
@@ -18,7 +18,7 @@ communication is disrupted, the child process is terminated.
The number of lock holders is configurable with the `-n` flag. By default,
a single holder is allowed, and a lock is used for mutual exclusion. This
-uses the [leader election algorithm](/consul/docs/dynamic-app-config/sessions/application-leader-election).
+uses the [leader election algorithm](/consul/docs/automate/application-leader-election).
If the lock holder count is more than one, then a semaphore is used instead.
A semaphore allows more than a single holder, but this is less efficient than
@@ -80,9 +80,43 @@ Windows has no POSIX compatible notion for `SIGTERM`.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
+
+## Node health checks and TTL behavior
+
+When you run `consul lock`, Consul automatically creates an _ephemeral session_ that attaches one or more node checks, such as the `serfHealth` check, by default. These checks keep the node “healthy” from Consul’s perspective. This session automatically renews as long as the agent passes these health checks. For sessions with a a TTL configured, that TTL never reaches zero as long as the node remains healthy.
+
+This design ensures the lock is not lost as long as the local Consul agent is up and healthy. However, emphemeral sessions run indefinitely when:
+
+- **The node remains healthy**, including in partial networks where at least one Consul server still reads the node as online.
+- **No explicit session destroy** or forced release occurs.
+
+To strictly enforce the TTL and prevent auto-renewed by node checks, you must create or manage your own session separately. In that scenario:
+
+1. **Manually create a session** with the HTTP API that either excludes node checks or uses custom service checks.
+1. **Acquire the lock** on that custom session using the raw KV API (`?acquire=`).
+1. **Manage renewals** and releases yourself as needed.
+
+When you remove node checks, the TTL-based session expires after the specified time if you do not renew it. Consul releases the lock automatically when the session expires.
+
+## Node health checks and TTL behavior
+
+When you run `consul lock`, Consul automatically creates an _ephemeral session_ that attaches one or more node checks, such as the `serfHealth` check, by default. These checks keep the node “healthy” from Consul’s perspective. This session automatically renews as long as the agent passes these health checks. For sessions with a a TTL configured, that TTL never reaches zero as long as the node remains healthy.
+
+This design ensures the lock is not lost as long as the local Consul agent is up and healthy. However, emphemeral sessions run indefinitely when:
+
+- **The node remains healthy**, including in partial networks where at least one Consul server still reads the node as online.
+- **No explicit session destroy** or forced release occurs.
+
+To strictly enforce the TTL and prevent auto-renewed by node checks, you must create or manage your own session separately. In that scenario:
+
+1. **Manually create a session** with the HTTP API that either excludes node checks or uses custom service checks.
+1. **Acquire the lock** on that custom session using the raw KV API (`?acquire=`).
+1. **Manage renewals** and releases yourself as needed.
+
+When you remove node checks, the TTL-based session expires after the specified time if you do not renew it. Consul releases the lock automatically when the session expires.
## SHELL
diff --git a/website/content/commands/login.mdx b/website/content/commands/login.mdx
index b5ec3c11cb1f..98017e58a126 100644
--- a/website/content/commands/login.mdx
+++ b/website/content/commands/login.mdx
@@ -51,11 +51,11 @@ Usage: `consul login [options]`
- `-oidc-callback-listen-addr=` - The address to bind a webserver on to
handle the browser callback from the OIDC workflow. Added in Consul 1.8.0.
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
### Examples
diff --git a/website/content/commands/logout.mdx b/website/content/commands/logout.mdx
index 315393640465..5366fcc9d429 100644
--- a/website/content/commands/logout.mdx
+++ b/website/content/commands/logout.mdx
@@ -33,7 +33,7 @@ Usage: `consul logout [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
### Examples
diff --git a/website/content/commands/maint.mdx b/website/content/commands/maint.mdx
index 387c3668211b..d76848425e3a 100644
--- a/website/content/commands/maint.mdx
+++ b/website/content/commands/maint.mdx
@@ -53,7 +53,7 @@ Usage: `consul maint [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## List mode
diff --git a/website/content/commands/members.mdx b/website/content/commands/members.mdx
index 6299f21e4c82..a244601408ea 100644
--- a/website/content/commands/members.mdx
+++ b/website/content/commands/members.mdx
@@ -56,8 +56,8 @@ Usage: `consul members [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/monitor.mdx b/website/content/commands/monitor.mdx
index b37635b57b4d..27b089a07758 100644
--- a/website/content/commands/monitor.mdx
+++ b/website/content/commands/monitor.mdx
@@ -34,4 +34,4 @@ Usage: `consul monitor [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/namespace/create.mdx b/website/content/commands/namespace/create.mdx
index 068666e5cafd..07a3541072d2 100644
--- a/website/content/commands/namespace/create.mdx
+++ b/website/content/commands/namespace/create.mdx
@@ -61,11 +61,11 @@ from the CLI arguments.
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/namespace/delete.mdx b/website/content/commands/namespace/delete.mdx
index adc235be6062..263757539c27 100644
--- a/website/content/commands/namespace/delete.mdx
+++ b/website/content/commands/namespace/delete.mdx
@@ -30,11 +30,11 @@ Usage: `consul namespace delete `
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/namespace/list.mdx b/website/content/commands/namespace/list.mdx
index 1b043ed657d6..8ebb7f268eed 100644
--- a/website/content/commands/namespace/list.mdx
+++ b/website/content/commands/namespace/list.mdx
@@ -42,11 +42,11 @@ Usage: `consul namespace list`
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/namespace/read.mdx b/website/content/commands/namespace/read.mdx
index 778f672ff5c8..89e7e8f6c78a 100644
--- a/website/content/commands/namespace/read.mdx
+++ b/website/content/commands/namespace/read.mdx
@@ -41,11 +41,11 @@ Usage: `consul namespace read `
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/namespace/update.mdx b/website/content/commands/namespace/update.mdx
index 4cce12235675..1cd65244c29c 100644
--- a/website/content/commands/namespace/update.mdx
+++ b/website/content/commands/namespace/update.mdx
@@ -68,11 +68,11 @@ with the existing namespace definition.
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/namespace/write.mdx b/website/content/commands/namespace/write.mdx
index f1a16ec006bd..7fcee48b8654 100644
--- a/website/content/commands/namespace/write.mdx
+++ b/website/content/commands/namespace/write.mdx
@@ -29,7 +29,7 @@ Usage: `consul namespace write `
The `` must either be a file path or `-` to indicate that
the definition should be read from stdin. The definition can be in either JSON
-or HCL format. See [here](/consul/docs/enterprise/namespaces#namespace-definition) for a description of the namespace definition.
+or HCL format. See [here](/consul/docs/multi-tenant/namespace#namespace-definition) for a description of the namespace definition.
#### Command Options
@@ -40,11 +40,11 @@ or HCL format. See [here](/consul/docs/enterprise/namespaces#namespace-definitio
#### API Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/operator/area.mdx b/website/content/commands/operator/area.mdx
index 0b5525c714b4..e155ea04f3e9 100644
--- a/website/content/commands/operator/area.mdx
+++ b/website/content/commands/operator/area.mdx
@@ -83,9 +83,9 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## delete
@@ -121,9 +121,9 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## join
@@ -166,9 +166,9 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## list
@@ -204,9 +204,9 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## members
@@ -269,9 +269,9 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## update
@@ -310,6 +310,6 @@ The return code indicates success or failure.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
\ No newline at end of file
+@include 'legacy/http_api_options_server.mdx'
\ No newline at end of file
diff --git a/website/content/commands/operator/autopilot.mdx b/website/content/commands/operator/autopilot.mdx
index 1b4f43409c83..8a1ac55759db 100644
--- a/website/content/commands/operator/autopilot.mdx
+++ b/website/content/commands/operator/autopilot.mdx
@@ -57,9 +57,9 @@ UpgradeMigrationTag = ""
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## set-config
@@ -107,17 +107,17 @@ The return code indicates success or failure.
- `-disable-upgrade-migration` - Controls whether Consul will avoid promoting
new servers until it can perform a migration. Must be one of `[true|false]`.
-- `-redundancy-zone-tag` - Controls the [`-node-meta`](/consul/docs/agent/config/cli-flags#_node_meta)
+- `-redundancy-zone-tag` - Controls the [`-node-meta`](/consul/commands/agent#_node_meta)
key name used for separating servers into different redundancy zones.
-- `-upgrade-version-tag` - Controls the [`-node-meta`](/consul/docs/agent/config/cli-flags#_node_meta)
+- `-upgrade-version-tag` - Controls the [`-node-meta`](/consul/commands/agent#_node_meta)
tag to use for version info when performing upgrade migrations. If left blank, the Consul version will be used.
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## state
@@ -137,9 +137,9 @@ Usage: `consul operator autopilot state [options]`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
#### Command Options
diff --git a/website/content/commands/operator/usage.mdx b/website/content/commands/operator/usage.mdx
index 83ae028f6c95..3cd5446e6c9d 100644
--- a/website/content/commands/operator/usage.mdx
+++ b/website/content/commands/operator/usage.mdx
@@ -105,7 +105,7 @@ Total 3
#### Command Options
-- `-all-datacenters` - Display service counts from all known datacenters.
+- `-all-datacenters` - Display service counts from all known federated datacenters.
Default is `false`.
- `-billable` - Display only billable service information. Default is `false`.
diff --git a/website/content/commands/partition.mdx b/website/content/commands/partition.mdx
index 660ea71ccaaf..3dbb0cb47a9e 100644
--- a/website/content/commands/partition.mdx
+++ b/website/content/commands/partition.mdx
@@ -5,7 +5,7 @@ description: |
The partition command enables you create and manage Consul Enterprise admin partitions.
---
-@include 'http_api_and_cli_characteristics_links.mdx'
+@include 'legacy/http_api_and_cli_characteristics_links.mdx'
# Consul Admin Partition
diff --git a/website/content/commands/peering/delete.mdx b/website/content/commands/peering/delete.mdx
index 1bd474dc306f..f913b49f59a0 100644
--- a/website/content/commands/peering/delete.mdx
+++ b/website/content/commands/peering/delete.mdx
@@ -34,11 +34,11 @@ Usage: `consul peering delete [options] -name `
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/peering/establish.mdx b/website/content/commands/peering/establish.mdx
index 782fb9cda681..894e39e04226 100644
--- a/website/content/commands/peering/establish.mdx
+++ b/website/content/commands/peering/establish.mdx
@@ -36,11 +36,11 @@ Usage: `consul peering establish [options] -name -peering-token `
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/peering/generate-token.mdx b/website/content/commands/peering/generate-token.mdx
index 6ce3fb059cb5..0a818866731b 100644
--- a/website/content/commands/peering/generate-token.mdx
+++ b/website/content/commands/peering/generate-token.mdx
@@ -43,11 +43,11 @@ You can specify one or more load balancers or external IPs that route external t
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/peering/list.mdx b/website/content/commands/peering/list.mdx
index 9838de3e7bc5..cbf44dc06131 100644
--- a/website/content/commands/peering/list.mdx
+++ b/website/content/commands/peering/list.mdx
@@ -30,11 +30,11 @@ Usage: `consul peering list [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/peering/read.mdx b/website/content/commands/peering/read.mdx
index 95a41b2701aa..14f5abb3cc5a 100644
--- a/website/content/commands/peering/read.mdx
+++ b/website/content/commands/peering/read.mdx
@@ -31,11 +31,11 @@ Usage: `consul peering read [options] -name `
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/reload.mdx b/website/content/commands/reload.mdx
index a7eaa45df3d2..b649bf6f61c4 100644
--- a/website/content/commands/reload.mdx
+++ b/website/content/commands/reload.mdx
@@ -39,4 +39,4 @@ Usage: `consul reload`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/rtt.mdx b/website/content/commands/rtt.mdx
index 0fc2bd37abbd..2e67aa6499cd 100644
--- a/website/content/commands/rtt.mdx
+++ b/website/content/commands/rtt.mdx
@@ -69,4 +69,4 @@ The following environment variables control accessing the HTTP server via SSL:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
diff --git a/website/content/commands/services/deregister.mdx b/website/content/commands/services/deregister.mdx
index 138de2e763eb..adaffcc5b4a5 100644
--- a/website/content/commands/services/deregister.mdx
+++ b/website/content/commands/services/deregister.mdx
@@ -16,7 +16,7 @@ Note that this command can only deregister services that were registered
with the agent specified and is intended to be paired with `services register`.
By default, the command deregisters services on the local agent.
-We recommend deregistering services registered with a configuration file by deleting the file and [reloading](/consul/commands/reload) Consul. Refer to [Services Overview](/consul/docs/services/services) for additional information about services.
+We recommend deregistering services registered with a configuration file by deleting the file and [reloading](/consul/commands/reload) Consul. Refer to [Services Overview](/consul/docs/fundamentals/service) for additional information about services.
The following table shows the [ACLs](/consul/api-docs/api-structure#authentication) required to run the `consul services deregister` command:
@@ -43,13 +43,13 @@ This flexibility makes it easy to pair the command with the
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/services/export.mdx b/website/content/commands/services/export.mdx
index 370f95dd6437..91e8e76a78dd 100644
--- a/website/content/commands/services/export.mdx
+++ b/website/content/commands/services/export.mdx
@@ -40,13 +40,13 @@ Usage: consul services export [options] -name -consumer-peers ` - A comma-separated list of admin partitions within the
same datacenter to export the service to. This flag is optional when `-consumer-peers` is specified.
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/services/exported-services.mdx b/website/content/commands/services/exported-services.mdx
index 395ad2cb8b30..d1fbcb0dfe04 100644
--- a/website/content/commands/services/exported-services.mdx
+++ b/website/content/commands/services/exported-services.mdx
@@ -11,7 +11,7 @@ Command: `consul services exported-services`
Corresponding HTTP API Endpoint: [\[GET\] /v1/exported-services](/consul/api-docs/exported-services)
-The `exported-services` command displays the services that were exported using an [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services). Sameness groups and wildcards in the configuration entry are expanded in the response.
+The `exported-services` command displays the services that were exported using an [`exported-services` configuration entry](/consul/docs/reference/config-entry/exported-services). Sameness groups and wildcards in the configuration entry are expanded in the response.
The table below shows this command's [required ACLs](/consul/api-docs/api-structure#authentication).
@@ -32,11 +32,11 @@ Usage: `consul services exported-services [options]`
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/services/index.mdx b/website/content/commands/services/index.mdx
index f511ffe2efce..e70a9c906771 100644
--- a/website/content/commands/services/index.mdx
+++ b/website/content/commands/services/index.mdx
@@ -10,7 +10,7 @@ description: |
Command: `consul services`
The `services` command has subcommands for interacting with Consul services
-registered with the [local agent](/consul/docs/agent). These provide
+registered with the [local agent](/consul/docs/fundamentals/agent). These provide
useful commands such as `register` and `deregister` for easily registering
services in scripts, dev mode, etc.
To view all services in the catalog, instead of only agent-local services,
diff --git a/website/content/commands/services/register.mdx b/website/content/commands/services/register.mdx
index 01a09d19bfdb..e7977407b438 100644
--- a/website/content/commands/services/register.mdx
+++ b/website/content/commands/services/register.mdx
@@ -14,7 +14,7 @@ Corresponding HTTP API Endpoint: [\[PUT\] /v1/agent/service/register](/consul/ap
The `services register` command registers a service with the local agent.
This command returns after registration and must be paired with explicit
service deregistration. This command simplifies service registration from
-scripts. Refer to [Register Services and Health Checks](/consul/docs/services/usage/register-services-checks) for information about other service registration methods.
+scripts. Refer to [Register Services and Health Checks](/consul/docs/register/service/vm) for information about other service registration methods.
The following table shows the [ACLs](/consul/api-docs/api-structure#authentication) required to use the `consul services register` command:
@@ -57,7 +57,7 @@ The flags below should only be set if _no arguments_ are given. If no
arguments are given, the flags below can be used to register a single
service.
-The following fields specify identical parameters in a standard service definition file. Refer to [Services Configuration Reference](/consul/docs/services/configuration/services-configuration-reference) for details about each configuration option.
+The following fields specify identical parameters in a standard service definition file. Refer to [Services Configuration Reference](/consul/docs/reference/service) for details about each configuration option.
- `-id` - The ID of the service. This will default to `-name` if not set.
@@ -78,13 +78,13 @@ The following fields specify identical parameters in a standard service definiti
#### Enterprise Options
-@include 'cli-http-api-partition-options.mdx'
+@include 'legacy/cli-http-api-partition-options.mdx'
-@include 'http_api_namespace_options.mdx'
+@include 'legacy/http_api_namespace_options.mdx'
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
diff --git a/website/content/commands/snapshot/agent.mdx b/website/content/commands/snapshot/agent.mdx
index e86c62809e48..d374c343afb6 100644
--- a/website/content/commands/snapshot/agent.mdx
+++ b/website/content/commands/snapshot/agent.mdx
@@ -396,9 +396,16 @@ These options cannot be used when using `backup_destinations` in a config file.
- `-azure-blob-container-name` - Container to use. Required for Azure blob storage, and setting this
disables local storage.
-* `-azure-blob-environment` - Environment to use. Defaults to AZUREPUBLICCLOUD. Other valid environments
+- `-azure-blob-environment` - Environment to use. Defaults to AZUREPUBLICCLOUD. Other valid environments
are AZURECHINACLOUD, AZUREGERMANCLOUD and AZUREUSGOVERNMENTCLOUD. Introduced in Consul 1.7.3.
+~> The following options were introduced in v1.20.1+ent.
+
+- `-azure-blob-service-principal-id` - The ID of the service principal that owns the blob object.
+- `-azure-blob-service-principal-secret` - The secret of the service principal that owns the blob object.
+
+- `-azure-blob-tenant-id` - The ID of the tenant that owns the Azure blob.
+
#### Google Cloud Storage options
~> This option is deprecated when used with a top-level `google_storage` object in a config file. Use `snapshot_agent -> backup_destinations -> google_storage[0]` in a config file instead.
@@ -420,7 +427,7 @@ This integration needs the following information:
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
## Examples
@@ -459,5 +466,5 @@ then the order of precedence is as follows:
3. `license_path` configuration.
The ability to load licenses from the configuration or environment was added in v1.10.0,
-v1.9.7 and v1.8.13. See the [licensing documentation](/consul/docs/enterprise/license/overview) for
+v1.9.7 and v1.8.13. See the [licensing documentation](/consul/docs/enterprise/license) for
more information about Consul Enterprise license management.
diff --git a/website/content/commands/snapshot/restore.mdx b/website/content/commands/snapshot/restore.mdx
index e33827f48c1a..034f5669c422 100644
--- a/website/content/commands/snapshot/restore.mdx
+++ b/website/content/commands/snapshot/restore.mdx
@@ -36,9 +36,9 @@ Usage: `consul snapshot restore [options] FILE`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
## Examples
diff --git a/website/content/commands/snapshot/save.mdx b/website/content/commands/snapshot/save.mdx
index cf77cd48a695..1427bcebf421 100644
--- a/website/content/commands/snapshot/save.mdx
+++ b/website/content/commands/snapshot/save.mdx
@@ -45,9 +45,9 @@ Usage: `consul snapshot save [options] FILE`
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
- `-append-filename=` - Value can be - version,dc,node,status
Adds consul version, datacenter name, node name, and status (leader/follower)
diff --git a/website/content/commands/tls/ca.mdx b/website/content/commands/tls/ca.mdx
index f0ec37018c58..3e6afe55295d 100644
--- a/website/content/commands/tls/ca.mdx
+++ b/website/content/commands/tls/ca.mdx
@@ -41,7 +41,7 @@ Usage: `consul tls ca create [options]`
- `-days=` - Number of days the CA is valid for. Defaults to 1825 days (approximately 5 years).
-- `-domain=` - The DNS domain of the Consul cluster that agents are [configured](/consul/docs/agent/config/cli-flags#_domain) with.
+- `-domain=` - The DNS domain of the Consul cluster that agents are [configured](/consul/commands/agent#_domain) with.
Defaults to `consul`. Only used when `-name-constraint` is set.
Additional domains can be passed with `-additional-name-constraint`.
diff --git a/website/content/commands/troubleshoot/index.mdx b/website/content/commands/troubleshoot/index.mdx
index 74d9d9cec32a..a681e20bcccf 100644
--- a/website/content/commands/troubleshoot/index.mdx
+++ b/website/content/commands/troubleshoot/index.mdx
@@ -9,7 +9,7 @@ description: >-
Command: `consul troubleshoot`
-Use the `troubleshoot` command to diagnose Consul service mesh configuration or network issues. For additional information about using the `troubleshoot` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/troubleshoot-services).
+Use the `troubleshoot` command to diagnose Consul service mesh configuration or network issues. For additional information about using the `troubleshoot` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/service-communication).
## Usage
diff --git a/website/content/commands/troubleshoot/ports.mdx b/website/content/commands/troubleshoot/ports.mdx
index 5a4d5faf5082..0a0986d747d7 100644
--- a/website/content/commands/troubleshoot/ports.mdx
+++ b/website/content/commands/troubleshoot/ports.mdx
@@ -23,7 +23,7 @@ Usage: `consul troubleshoot ports [options]`
## Examples
The following example checks the default ports Consul server uses for TCP connectivity. Note that the `CONSUL_HTTP_ADDR` environment variable is set to `localhost`. As a result, the `-host` flag is not required.
-Refer to [Required Ports](/consul/docs/install/ports) for additional information.
+Refer to [Required Ports](/consul/docs/reference/architecture/ports) for additional information.
```shell-session
$ export CONSUL_HTTP_ADDR=localhost
diff --git a/website/content/commands/troubleshoot/proxy.mdx b/website/content/commands/troubleshoot/proxy.mdx
index fcd9552247dd..11cd6e1955d6 100644
--- a/website/content/commands/troubleshoot/proxy.mdx
+++ b/website/content/commands/troubleshoot/proxy.mdx
@@ -9,7 +9,7 @@ description: >-
Command: `consul troubleshoot proxy`
-The `troubleshoot proxy` command diagnoses Consul service mesh configuration and network issues to an upstream. For additional information about using the `troubleshoot proxy` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/troubleshoot-services).
+The `troubleshoot proxy` command diagnoses Consul service mesh configuration and network issues to an upstream. For additional information about using the `troubleshoot proxy` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/service-communication).
## Usage
diff --git a/website/content/commands/troubleshoot/upstreams.mdx b/website/content/commands/troubleshoot/upstreams.mdx
index f04a8beedc08..b5ca7ccbf88d 100644
--- a/website/content/commands/troubleshoot/upstreams.mdx
+++ b/website/content/commands/troubleshoot/upstreams.mdx
@@ -9,7 +9,7 @@ description: >-
Command: `consul troubleshoot upstreams`
-The `troubleshoot upstreams` lists the available upstreams in the Consul service mesh from the current service. For additional information about using the `troubleshoot upstreams` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/troubleshoot-services).
+The `troubleshoot upstreams` lists the available upstreams in the Consul service mesh from the current service. For additional information about using the `troubleshoot upstreams` command, including explanations, requirements, usage instructions, refer to the [service-to-service troubleshooting overview](/consul/docs/troubleshoot/service-communication).
## Usage
diff --git a/website/content/commands/validate.mdx b/website/content/commands/validate.mdx
index a435f29120a6..f92d7983e408 100644
--- a/website/content/commands/validate.mdx
+++ b/website/content/commands/validate.mdx
@@ -21,7 +21,7 @@ to be loaded by the agent. This command cannot operate on partial
configuration fragments since those won't pass the full agent validation.
For more information on the format of Consul's configuration files, read the
-consul agent [Configuration Files](/consul/docs/agent/config/config-files)
+consul agent [Configuration Files](/consul/docs/reference/agent/configuration-file)
section.
## Usage
diff --git a/website/content/commands/watch.mdx b/website/content/commands/watch.mdx
index 806864dae953..89d6fa4661a1 100644
--- a/website/content/commands/watch.mdx
+++ b/website/content/commands/watch.mdx
@@ -19,7 +19,7 @@ a process with the latest values of the view. If no process is specified,
the current values are dumped to STDOUT which can be a useful way to inspect
data in Consul.
-There is more [documentation on watches here](/consul/docs/dynamic-app-config/watches).
+There is more [documentation on watches here](/consul/docs/automate/watch).
## Usage
@@ -28,7 +28,7 @@ Usage: `consul watch [options] [child...]`
The only required option is `-type` which specifies the particular
data view. Depending on the type, various options may be required
or optionally provided. There is more documentation on watch
-[specifications here](/consul/docs/dynamic-app-config/watches).
+[specifications here](/consul/docs/automate/watch).
#### Command Options
@@ -60,6 +60,6 @@ or optionally provided. There is more documentation on watch
#### API Options
-@include 'http_api_options_client.mdx'
+@include 'legacy/http_api_options_client.mdx'
-@include 'http_api_options_server.mdx'
+@include 'legacy/http_api_options_server.mdx'
diff --git a/website/content/docs/agent/config-entries.mdx b/website/content/docs/agent/config-entries.mdx
deleted file mode 100644
index 9eb3f82f7f5f..000000000000
--- a/website/content/docs/agent/config-entries.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
----
-layout: docs
-page_title: How to Use Configuration Entries
-description: >-
- Configuration entries define the behavior of Consul service mesh components. Learn how to use the `consul config` command to create, manage, and delete configuration entries.
----
-
-# How to Use Configuration Entries
-
-Configuration entries can be created to provide cluster-wide defaults for
-various aspects of Consul.
-
-Outside of Kubernetes, configuration entries can be specified in HCL or JSON using either
-`snake_case` or `CamelCase` for key names. On Kubernetes, configuration
-entries can be managed by custom resources in YAML.
-
-Outside of Kubernetes, every configuration entry specified in HCL or JSON has at least two fields:
-`Kind` and `Name`. Those two fields are used to uniquely identify a
-configuration entry. Configuration entries specified as HCL or JSON objects
-use either `snake_case` or `CamelCase` for key names.
-
-
-
-```hcl
-Kind = ""
-Name = ""
-```
-
-
-
-On Kubernetes, `Kind` is set as the custom resource `kind` and `Name` is set
-as `metadata.name`:
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind:
-metadata:
- name:
-```
-
-
-
-## Supported Config Entries
-
-See [Service Mesh - Config Entries](/consul/docs/connect/config-entries) for the list
-of supported config entries.
-
-## Managing Configuration Entries In Kubernetes
-
-See [Kubernetes Custom Resource Definitions](/consul/docs/k8s/crds).
-
-## Managing Configuration Entries Outside Of Kubernetes
-
-Configuration entries outside of Kubernetes should be managed with the Consul
-[CLI](/consul/commands/config) or [API](/consul/api-docs/config). Additionally, as a
-convenience for initial cluster bootstrapping, configuration entries can be
-specified in all of the Consul server
-[configuration files](/consul/docs/agent/config/config-files#config_entries_bootstrap)
-
-### Managing Configuration Entries with the CLI
-
-#### Creating or Updating a Configuration Entry
-
-The [`consul config write`](/consul/commands/config/write) command is used to
-create and update configuration entries. This command will load either a JSON or
-HCL file holding the configuration entry definition and then will push this
-configuration to Consul.
-
-Example HCL Configuration File:
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-Config {
- local_connect_timeout_ms = 1000
- handshake_timeout_ms = 10000
-}
-```
-
-
-
-Then to apply this configuration, run:
-
-```shell-session
-$ consul config write proxy-defaults.hcl
-```
-
-If you need to make changes to a configuration entry, simple edit that file and
-then rerun the command. This command will not output anything unless there is an
-error in applying the configuration entry. The `write` command also supports a
-`-cas` option to enable performing a compare-and-swap operation to prevent
-overwriting other unknown modifications.
-
-#### Reading a Configuration Entry
-
-The [`consul config read`](/consul/commands/config/read) command is used to
-read the current value of a configuration entry. The configuration entry will be
-displayed in JSON form which is how its transmitted between the CLI client and
-Consul's HTTP API.
-
-Example:
-
-```shell-session
-$ consul config read -kind service-defaults -name web
-{
- "Kind": "service-defaults",
- "Name": "web",
- "Protocol": "http"
-}
-```
-
-#### Listing Configuration Entries
-
-The [`consul config list`](/consul/commands/config/list) command is used to
-list out all the configuration entries for a given kind.
-
-Example:
-
-```shell-session
-$ consul config list -kind service-defaults
-web
-api
-db
-```
-
-#### Deleting Configuration Entries
-
-The [`consul config delete`](/consul/commands/config/delete) command is used
-to delete an entry by specifying both its `kind` and `name`.
-
-Example:
-
-```shell-session
-$ consul config delete -kind service-defaults -name web
-```
-
-This command will not output anything when the deletion is successful.
-
-#### Configuration Entry Management with Namespaces
-
-Configuration entry operations support passing a namespace in
-order to isolate the entry to affect only operations within that namespace. This was
-added in Consul 1.7.0.
-
-Example:
-
-```shell-session
-$ consul config write service-defaults.hcl -namespace foo
-```
-
-```shell-session
-$ consul config list -kind service-defaults -namespace foo
-web
-api
-```
-
-### Bootstrapping From A Configuration File
-
-Configuration entries can be bootstrapped by adding them [inline to each Consul
-server's configuration file](/consul/docs/agent/config/config-files#config_entries). When a
-server gains leadership, it will attempt to initialize the configuration entries.
-If a configuration entry does not already exist outside of the servers
-configuration, then it will create it. If a configuration entry does exist, that
-matches both `kind` and `name`, then the server will do nothing.
diff --git a/website/content/docs/agent/config/cli-flags.mdx b/website/content/docs/agent/config/cli-flags.mdx
deleted file mode 100644
index a38ddff8cbb5..000000000000
--- a/website/content/docs/agent/config/cli-flags.mdx
+++ /dev/null
@@ -1,565 +0,0 @@
----
-layout: docs
-page_title: Agents - CLI Reference
-description: >-
- Add flags to the `consul agent` command to configure agent properties and actions from the CLI. Learn about configurable options and how to format them with examples.
----
-
-# Agents Command-line Reference ((#commandline_options))
-
--> **Note:** Some CLI arguments may be different from HCL keys. See [Configuration Key Reference](/consul/docs/agent/config/config-files#config_key_reference) for equivalent HCL Keys.
-
-This topic describes the available command-line options for the Consul agent.
-
-## Usage
-
-See [Agent Overview](/consul/docs/agent#starting-the-consul-agent) for examples of how to use flags with the `consul agent` CLI.
-
-## Environment Variables
-
-Environment variables **cannot** be used to configure the Consul client. They
-_can_ be used when running other `consul` CLI commands that connect with a
-running agent, e.g. `CONSUL_HTTP_ADDR=192.168.0.1:8500 consul members`.
-
-See [Consul Commands](/consul/commands#environment-variables) for more
-information.
-
-## General
-
-- `-auto-reload-config` ((#\_auto_reload_config)) - This option directs Consul to automatically reload the [reloadable configuration options](/consul/docs/agent/config#reloadable-configuration) when configuration files change.
- Consul also watches the certificate and key files specified with the `cert_file` and `key_file` parameters and reloads the configuration if the files are updated.
-
-- `-check_output_max_size` - Override the default
- limit of 4k for maximum size of checks, this is a positive value. By limiting this
- size, it allows to put less pressure on Consul servers when many checks are having
- a very large output in their checks. In order to completely disable check output
- capture, it is possible to use [`discard_check_output`](/consul/docs/agent/config/config-files#discard_check_output).
-
-- `-client` ((#\_client)) - The address to which Consul will bind client
- interfaces, including the HTTP and DNS servers. By default, this is "127.0.0.1",
- allowing only loopback connections. In Consul 1.0 and later this can be set to
- a space-separated list of addresses to bind to, or a [go-sockaddr]
- template that can potentially resolve to multiple addresses.
-
-
-
- ```shell
- $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "type" "ipv6" | join "address" " " }}'
- ```
-
-
-
-
-
- ```shell
- $ consul agent -dev -client '{{ GetPrivateInterfaces | join "address" " " }} {{ GetAllInterfaces | include "flags" "loopback" | join "address" " " }}'
- ```
-
-
-
-
-
- ```shell
- $ consul agent -dev -client '{{ GetPrivateInterfaces | exclude "name" "br.*" | join "address" " " }}'
- ```
-
-
-
-- `-data-dir` ((#\_data_dir)) - This flag provides a data directory for
- the agent to store state. This is required for all agents. The directory should
- be durable across reboots. This is especially critical for agents that are running
- in server mode as they must be able to persist cluster state. Additionally, the
- directory must support the use of filesystem locking, meaning some types of mounted
- folders (e.g. VirtualBox shared folders) may not be suitable.
-
- **Note:** both server and non-server agents may store ACL tokens in the state in this directory so read access may grant access to any tokens on servers and to any tokens used during service registration on non-servers. On Unix-based platforms the files are written with 0600 permissions so you should ensure only trusted processes can execute as the same user as Consul. On Windows, you should ensure the directory has suitable permissions configured as these will be inherited.
-
-- `-datacenter` ((#\_datacenter)) - This flag controls the datacenter in
- which the agent is running. If not provided, it defaults to "dc1". Consul has first-class
- support for multiple datacenters, but it relies on proper configuration. Nodes
- in the same datacenter should be on a single LAN.
-
-- `-dev` ((#\_dev)) - Enable development server mode. This is useful for
- quickly starting a Consul agent with all persistence options turned off, enabling
- an in-memory server which can be used for rapid prototyping or developing against
- the API. In this mode, [service mesh is enabled](/consul/docs/connect/configuration) and
- will by default create a new root CA certificate on startup. This mode is **not**
- intended for production use as it does not write any data to disk. The gRPC port
- is also defaulted to `8502` in this mode.
-
-- `-disable-keyring-file` ((#\_disable_keyring_file)) - If set, the keyring
- will not be persisted to a file. Any installed keys will be lost on shutdown, and
- only the given `-encrypt` key will be available on startup. This defaults to false.
-
-- `-enable-script-checks` ((#\_enable_script_checks)) This controls whether
- [health checks that execute scripts](/consul/docs/services/usage/checks) are enabled on this
- agent, and defaults to `false` so operators must opt-in to allowing these. This
- was added in Consul 0.9.0.
-
- ~> **Security Warning:** Enabling script checks in some configurations may
- introduce a remote execution vulnerability which is known to be targeted by
- malware. We strongly recommend `-enable-local-script-checks` instead. See [this
- blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations)
- for more details.
-
-- `-enable-local-script-checks` ((#\_enable_local_script_checks))
- Like [`-enable-script-checks`](#_enable_script_checks), but only enable them when
- they are defined in the local configuration files. Script checks defined in HTTP
- API registrations will still not be allowed.
-
-- `-encrypt` ((#\_encrypt)) - Specifies the secret key to use for encryption
- of Consul network traffic. This key must be 32-bytes that are Base64-encoded. The
- easiest way to create an encryption key is to use [`consul keygen`](/consul/commands/keygen).
- All nodes within a cluster must share the same encryption key to communicate. The
- provided key is automatically persisted to the data directory and loaded automatically
- whenever the agent is restarted. This means that to encrypt Consul's gossip protocol,
- this option only needs to be provided once on each agent's initial startup sequence.
- If it is provided after Consul has been initialized with an encryption key, then
- the provided key is ignored and a warning will be displayed.
-
-- `-grpc-port` ((#\_grpc_port)) - the gRPC API port to listen on. Default
- -1 (gRPC disabled). See [ports](/consul/docs/agent/config#ports-used) documentation for more detail.
-
-- `-hcl` ((#\_hcl)) - A HCL configuration fragment. This HCL configuration
- fragment is appended to the configuration and allows to specify the full range
- of options of a config file on the command line. This option can be specified multiple
- times. This was added in Consul 1.0.
-
-- `-http-port` ((#\_http_port)) - the HTTP API port to listen on. This overrides
- the default port 8500. This option is very useful when deploying Consul to an environment
- which communicates the HTTP port through the environment e.g. PaaS like CloudFoundry,
- allowing you to set the port directly via a Procfile.
-
-- `-https-port` ((#\_https_port)) - the HTTPS API port to listen on. Default
- -1 (https disabled). See [ports](/consul/docs/agent/config#ports-used) documentation for more detail.
-
-- `-default-query-time` ((#\_default_query_time)) - This flag controls the
- amount of time a blocking query will wait before Consul will force a response.
- This value can be overridden by the `wait` query parameter. Note that Consul applies
- some jitter on top of this time. Defaults to 300s.
-
-- `-max-query-time` ((#\_max_query_time)) - this flag controls the maximum
- amount of time a blocking query can wait before Consul will force a response. Consul
- applies jitter to the wait time. The jittered time will be capped to this time.
- Defaults to 600s.
-
-- `-pid-file` ((#\_pid_file)) - This flag provides the file path for the
- agent to store its PID. This is useful for sending signals (for example, `SIGINT`
- to close the agent or `SIGHUP` to update check definitions) to the agent.
-
-- `-protocol` ((#\_protocol)) - The Consul protocol version to use. Consul
- agents speak protocol 2 by default, however agents will automatically use protocol > 2 when speaking to compatible agents. This should be set only when [upgrading](/consul/docs/upgrading). You can view the protocol versions supported by Consul by running `consul version`.
-
-- `-raft-protocol` ((#\_raft_protocol)) - This controls the internal version
- of the Raft consensus protocol used for server communications. This must be set
- to 3 in order to gain access to Autopilot features, with the exception of [`cleanup_dead_servers`](/consul/docs/agent/config/config-files#cleanup_dead_servers). Defaults to 3 in Consul 1.0.0 and later (defaulted to 2 previously). See [Raft Protocol Version Compatibility](/consul/docs/upgrading/upgrade-specific#raft-protocol-version-compatibility) for more details.
-
-- `-segment` ((#\_segment)) - This flag is used to set
- the name of the network segment the agent belongs to. An agent can only join and
- communicate with other agents within its network segment. Ensure the [join
- operation uses the correct port for this segment](/consul/docs/enterprise/network-segments/create-network-segment#configure-clients-to-join-segments).
- Review the [Network Segments documentation](/consul/docs/enterprise/network-segments/create-network-segment)
- for more details. By default, this is an empty string, which is the ``
- network segment.
-
- ~> **Warning:** The `segment` flag cannot be used with the [`partition`](/consul/docs/agent/config/config-files#partition-1) option.
-
-## Advertise Address Options
-
-- `-advertise` ((#\_advertise)) - The advertise address is used to change
- the address that we advertise to other nodes in the cluster. By default, the [`-bind`](#_bind)
- address is advertised. However, in some cases, there may be a routable address
- that cannot be bound. This flag enables gossiping a different address to support
- this. If this address is not routable, the node will be in a constant flapping
- state as other nodes will treat the non-routability as a failure. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
- template that is resolved at runtime.
-
-
-
- ```shell-session
- $ consul agent -advertise '{{ GetInterfaceIP "eth0" }}'
- ```
-
-
-
-- `-advertise-wan` ((#\_advertise-wan)) - The advertise WAN address is used
- to change the address that we advertise to server nodes joining through the WAN.
- This can also be set on client agents when used in combination with the [`translate_wan_addrs`](/consul/docs/agent/config/config-files#translate_wan_addrs) configuration option. By default, the [`-advertise`](#_advertise) address
- is advertised. However, in some cases all members of all datacenters cannot be
- on the same physical or virtual network, especially on hybrid setups mixing cloud
- and private datacenters. This flag enables server nodes gossiping through the public
- network for the WAN while using private VLANs for gossiping to each other and their
- client agents, and it allows client agents to be reached at this address when being
- accessed from a remote datacenter if the remote datacenter is configured with [`translate_wan_addrs`](/consul/docs/agent/config/config-files#translate_wan_addrs). In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
- template that is resolved at runtime.
-
-## Address Bind Options
-
-- `-bind` ((#\_bind)) - The address that should be bound to for internal
- cluster communications. This is an IP address that should be reachable by all other
- nodes in the cluster. By default, this is "0.0.0.0", meaning Consul will bind to
- all addresses on the local machine and will [advertise](#_advertise)
- the private IPv4 address to the rest of the cluster. If there are multiple private
- IPv4 addresses available, Consul will exit with an error at startup. If you specify
- `"[::]"`, Consul will [advertise](#_advertise) the public
- IPv6 address. If there are multiple public IPv6 addresses available, Consul will
- exit with an error at startup. Consul uses both TCP and UDP and the same port for
- both. If you have any firewalls, be sure to allow both protocols. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
- template that must resolve at runtime to a single address. Some example templates:
-
-
-
- ```shell-session
- $ consul agent -bind '{{ GetPrivateInterfaces | include "network" "10.0.0.0/8" | attr "address" }}'
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -bind '{{ GetInterfaceIP "eth0" }}'
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -bind '{{ GetAllInterfaces | include "name" "^eth" | include "flags" "forwardable|up" | attr "address" }}'
- ```
-
-
-
-- `-serf-wan-bind` ((#\_serf_wan_bind)) - The address that should be bound
- to for Serf WAN gossip communications. By default, the value follows the same rules
- as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind`
- option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
- template that is resolved at runtime.
-
-- `-serf-lan-bind` ((#\_serf_lan_bind)) - The address that should be bound
- to for Serf LAN gossip communications. This is an IP address that should be reachable
- by all other LAN nodes in the cluster. By default, the value follows the same rules
- as [`-bind` command-line flag](#_bind), and if this is not specified, the `-bind`
- option is used. This is available in Consul 0.7.1 and later. In Consul 1.1.0 and later this can be dynamically defined with a [go-sockaddr]
- template that is resolved at runtime.
-
-## Bootstrap Options
-
-- `-bootstrap` ((#\_bootstrap)) - This flag is used to control if a server
- is in "bootstrap" mode. It is important that no more than one server **per** datacenter
- be running in this mode. Technically, a server in bootstrap mode is allowed to
- self-elect as the Raft leader. It is important that only a single node is in this
- mode; otherwise, consistency cannot be guaranteed as multiple nodes are able to
- self-elect. It is not recommended to use this flag after a cluster has been bootstrapped.
-
-- `-bootstrap-expect` ((#\_bootstrap_expect)) - This flag provides the number
- of expected servers in the datacenter. Either this value should not be provided
- or the value must agree with other servers in the cluster. When provided, Consul
- waits until the specified number of servers are available and then bootstraps the
- cluster. This allows an initial leader to be elected automatically. This cannot
- be used in conjunction with the legacy [`-bootstrap`](#_bootstrap) flag. This flag
- requires [`-server`](#_server) mode.
-
-## Configuration File Options
-
-- `-config-file` ((#\_config_file)) - A configuration file to load. For
- more information on the format of this file, read the [Configuration Files](/consul/docs/agent/config/config-files)
- section. This option can be specified multiple times to load multiple configuration
- files. If it is specified multiple times, configuration files loaded later will
- merge with configuration files loaded earlier. During a config merge, single-value
- keys (string, int, bool) will simply have their values replaced while list types
- will be appended together.
-
-- `-config-dir` ((#\_config_dir)) - A directory of configuration files to
- load. Consul will load all files in this directory with the suffix ".json" or ".hcl".
- The load order is alphabetical, and the same merge routine is used as with
- the [`config-file`](#_config_file) option above. This option can be specified multiple
- times to load multiple directories. Sub-directories of the config directory are
- not loaded. For more information on the format of the configuration files, see
- the [Configuration Files](/consul/docs/agent/config/config-files) section.
-
-- `-config-format` ((#\_config_format)) - The format of the configuration
- files to load. Normally, Consul detects the format of the config files from the
- ".json" or ".hcl" extension. Setting this option to either "json" or "hcl" forces
- Consul to interpret any file with or without extension to be interpreted in that
- format.
-
-## DNS and Domain Options
-
-- `-dns-port` ((#\_dns_port)) - the DNS port to listen on. This overrides
- the default port 8600. This is available in Consul 0.7 and later.
-
-- `-domain` ((#\_domain)) - By default, Consul responds to DNS queries in
- the "consul." domain. This flag can be used to change that domain. All queries
- in this domain are assumed to be handled by Consul and will not be recursively
- resolved.
-
-- `-alt-domain` ((#\_alt_domain)) - This flag allows Consul to respond to
- DNS queries in an alternate domain, in addition to the primary domain. If unset,
- no alternate domain is used.
-
- In Consul 1.10.4 and later, Consul DNS responses will use the same domain as in the query (`-domain` or `-alt-domain`) where applicable.
- PTR query responses will always use `-domain`, since the desired domain cannot be included in the query.
-
-- `-recursor` ((#\_recursor)) - Specifies the address of an upstream DNS
- server. This option may be provided multiple times, and is functionally equivalent
- to the [`recursors` configuration option](/consul/docs/agent/config/config-files#recursors).
-
-- `-join` ((#\_join)) - **Deprecated in Consul 1.15. This flag will be removed in a future version of Consul. Use the `-retry-join` flag instead.**
- This is an alias of [`-retry-join`](#_retry_join).
-
-- `-retry-join` ((#\_retry_join)) - Address of another agent to join upon starting up. Joining is
- retried until success. Once the agent joins successfully as a member, it will not attempt to join
- again. After joining, the agent solely maintains its membership via gossip. This option can be
- specified multiple times to specify multiple agents to join. By default, the agent won't join any
- nodes when it starts up. The value can contain IPv4, IPv6, or DNS addresses. Literal IPv6
- addresses must be enclosed in square brackets. If multiple values are given, they are tried and
- retried in the order listed until the first succeeds.
-
- This supports [Cloud Auto-Joining](#cloud-auto-joining).
-
- This can be dynamically defined with a [go-sockaddr] template that is resolved at runtime.
-
- If Consul is running on a non-default Serf LAN port, you must specify the port number in the address when using the `-retry-join` flag. Alternatively, you can specify the custom port number as the default in the agent's [`ports.serf_lan`](/consul/docs/agent/config/config-files#serf_lan_port) configuration or with the [`-serf-lan-port`](#_serf_lan_port) command line flag when starting the agent.
-
- If your network contains network segments, refer to the [network segments documentation](/consul/docs/enterprise/network-segments/create-network-segment) for additional information.
-
- Here are some examples of using `-retry-join`:
-
-
-
- ```shell-session
- $ consul agent -retry-join "consul.domain.internal"
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -retry-join "10.0.4.67"
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -retry-join "192.0.2.10:8304"
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -retry-join "[::1]:8301"
- ```
-
-
-
-
-
- ```shell-session
- $ consul agent -retry-join "consul.domain.internal" -retry-join "10.0.4.67"
- ```
-
-
-
- ### Cloud Auto-Joining
-
- The `-retry-join` option accepts a unified interface using the
- [go-discover](https://github.com/hashicorp/go-discover) library for doing
- automatic cluster joining using cloud metadata. For more information, see
- the [Cloud Auto-join page](/consul/docs/install/cloud-auto-join).
-
-
-
- ```shell-session
- $ consul agent -retry-join "provider=aws tag_key=..."
- ```
-
-
-
-- `-retry-interval` ((#\_retry_interval)) - Time to wait between join attempts.
- Defaults to 30s.
-
-- `-retry-max` ((#\_retry_max)) - The maximum number of join attempts if using
- [`-retry-join`](#_retry_join) before exiting with return code 1. By default, this is set
- to 0 which is interpreted as infinite retries.
-
-- `-join-wan` ((#\_join_wan)) - **Deprecated in Consul 1.15. This flag will be removed in a future version of Consul. Use the `-retry-join-wan` flag instead.**
- This is an alias of [`-retry-join-wan`](#_retry_join_wan)
-
-- `-retry-join-wan` ((#\_retry_join_wan)) - Address of another WAN agent to join upon starting up.
- WAN joining is retried until success. This can be specified multiple times to specify multiple WAN
- agents to join. If multiple values are given, they are tried and retried in the order listed
- until the first succeeds. By default, the agent won't WAN join any nodes when it starts up.
-
- This supports [Cloud Auto-Joining](#cloud-auto-joining).
-
- This can be dynamically defined with a [go-sockaddr] template that is resolved at runtime.
-
-- `-primary-gateway` ((#\_primary_gateway)) - Similar to [`-retry-join-wan`](#_retry_join_wan)
- but allows retrying discovery of fallback addresses for the mesh gateways in the
- primary datacenter if the first attempt fails. This is useful for cases where we
- know the address will become available eventually. [Cloud Auto-Joining](#cloud-auto-joining)
- is supported as well as [go-sockaddr]
- templates. This was added in Consul 1.8.0.
-
-- `-retry-interval-wan` ((#\_retry_interval_wan)) - Time to wait between
- [`-retry-join-wan`](#_retry_join_wan) attempts. Defaults to 30s.
-
-- `-retry-max-wan` ((#\_retry_max_wan)) - The maximum number of [`-retry-join-wan`](#_join_wan)
- attempts to be made before exiting with return code 1. By default, this is set
- to 0 which is interpreted as infinite retries.
-
-- `-rejoin` ((#\_rejoin)) - When provided, Consul will ignore a previous
- leave and attempt to rejoin the cluster when starting. By default, Consul treats
- leave as a permanent intent and does not attempt to join the cluster again when
- starting. This flag allows the previous state to be used to rejoin the cluster.
-
-## Log Options
-
-- `-log-file` ((#\_log_file)) - writes all the Consul agent log messages
- to a file at the path indicated by this flag. The filename defaults to `consul.log`.
- When the log file rotates, this value is used as a prefix for the path to the log and the current timestamp is
- appended to the file name. If the value ends in a path separator, `consul-`
- will be appended to the value. If the file name is missing an extension, `.log`
- is appended. For example, setting `log-file` to `/var/log/` would result in a log
- file path of `/var/log/consul.log`. `log-file` can be combined with
- [`-log-rotate-bytes`](#_log_rotate_bytes) and [`-log-rotate-duration`](#_log_rotate_duration)
- for a fine-grained log rotation experience. After rotation, the path and filename take the following form:
- `/var/log/consul-{timestamp}.log`
-
-- `-log-rotate-bytes` ((#\_log_rotate_bytes)) - to specify the number of
- bytes that should be written to a log before it needs to be rotated. Unless specified,
- there is no limit to the number of bytes that can be written to a log file.
-
-- `-log-rotate-duration` ((#\_log_rotate_duration)) - to specify the maximum
- duration a log should be written to before it needs to be rotated. Must be a duration
- value such as 30s. Defaults to 24h.
-
-- `-log-rotate-max-files` ((#\_log_rotate_max_files)) - to specify the maximum
- number of older log file archives to keep. Defaults to 0 (no files are ever deleted).
- Set to -1 to discard old log files when a new one is created.
-
-- `-log-level` ((#\_log_level)) - The level of logging to show after the
- Consul agent has started. This defaults to "info". The available log levels are
- "trace", "debug", "info", "warn", and "error". You can always connect to an agent
- via [`consul monitor`](/consul/commands/monitor) and use any log level. Also,
- the log level can be changed during a config reload.
-
-- `-log-json` ((#\_log_json)) - This flag enables the agent to output logs
- in a JSON format. By default this is false.
-
-- `-syslog` ((#\_syslog)) - This flag enables logging to syslog. This is
- only supported on Linux and macOS. It will result in an error if provided on Windows.
-
-## Node Options
-
-- `-node` ((#\_node)) - The name of this node in the cluster. This must
- be unique within the cluster. By default this is the hostname of the machine.
- The node name cannot contain whitespace or quotation marks. To query the node from DNS, the name must only contain alphanumeric characters and hyphens (`-`).
-
-- `-node-id` ((#\_node_id)) - Available in Consul 0.7.3 and later, this
- is a unique identifier for this node across all time, even if the name of the node
- or address changes. This must be in the form of a hex string, 36 characters long,
- such as `adf4238a-882b-9ddc-4a9d-5b6758e4159e`. If this isn't supplied, which is
- the most common case, then the agent will generate an identifier at startup and
- persist it in the [data directory](#_data_dir) so that it will remain the same
- across agent restarts. Information from the host will be used to generate a deterministic
- node ID if possible, unless [`-disable-host-node-id`](#_disable_host_node_id) is
- set to true.
-
-- `-node-meta` ((#\_node_meta)) - Available in Consul 0.7.3 and later, this
- specifies an arbitrary metadata key/value pair to associate with the node, of the
- form `key:value`. This can be specified multiple times. Node metadata pairs have
- the following restrictions:
-
- - A maximum of 64 key/value pairs can be registered per node.
- - Metadata keys must be between 1 and 128 characters (inclusive) in length
- - Metadata keys must contain only alphanumeric, `-`, and `_` characters.
- - Metadata keys must not begin with the `consul-` prefix; that is reserved for internal use by Consul.
- - Metadata values must be between 0 and 512 (inclusive) characters in length.
- - Metadata values for keys beginning with `rfc1035-` are encoded verbatim in DNS TXT requests, otherwise
- the metadata kv-pair is encoded according [RFC1464](https://www.ietf.org/rfc/rfc1464.txt).
-
-- `-disable-host-node-id` ((#\_disable_host_node_id)) - Setting this to
- true will prevent Consul from using information from the host to generate a deterministic
- node ID, and will instead generate a random node ID which will be persisted in
- the data directory. This is useful when running multiple Consul agents on the same
- host for testing. This defaults to false in Consul prior to version 0.8.5 and in
- 0.8.5 and later defaults to true, so you must opt-in for host-based IDs. Host-based
- IDs are generated using [gopsutil](https://github.com/shirou/gopsutil/), which
- is shared with HashiCorp's [Nomad](https://www.nomadproject.io/), so if you opt-in
- to host-based IDs then Consul and Nomad will use information on the host to automatically
- assign the same ID in both systems.
-
-## Serf Options
-
-- `-serf-lan-allowed-cidrs` ((#\_serf_lan_allowed_cidrs)) - The Serf LAN allowed CIDRs allow to accept incoming
- connections for Serf only from several networks (multiple values are supported).
- Those networks are specified with CIDR notation (eg: 192.168.1.0/24).
- This is available in Consul 1.8 and later.
-
-- `-serf-lan-port` ((#\_serf_lan_port)) - the Serf LAN port to listen on.
- This overrides the default Serf LAN port 8301. This is available in Consul 1.2.2
- and later.
-
-- `-serf-wan-allowed-cidrs` ((#\_serf_wan_allowed_cidrs)) - The Serf WAN allowed CIDRs allow to accept incoming
- connections for Serf only from several networks (multiple values are supported).
- Those networks are specified with CIDR notation (eg: 192.168.1.0/24).
- This is available in Consul 1.8 and later.
-
-- `-serf-wan-port` ((#\_serf_wan_port)) - the Serf WAN port to listen on.
- This overrides the default Serf WAN port 8302. This is available in Consul 1.2.2
- and later.
-
-## Server Options
-
-- `-server` ((#\_server)) - This flag is used to control if an agent is
- in server or client mode. When provided, an agent will act as a Consul server.
- Each Consul cluster must have at least one server and ideally no more than 5 per
- datacenter. All servers participate in the Raft consensus algorithm to ensure that
- transactions occur in a consistent, linearizable manner. Transactions modify cluster
- state, which is maintained on all server nodes to ensure availability in the case
- of node failure. Server nodes also participate in a WAN gossip pool with server
- nodes in other datacenters. Servers act as gateways to other datacenters and forward
- RPC traffic as appropriate.
-
-- `-server-port` ((#\_server_port)) - the server RPC port to listen on.
- This overrides the default server RPC port 8300. This is available in Consul 1.2.2
- and later.
-
-- `-non-voting-server` ((#\_non_voting_server)) - **This field
- is deprecated in Consul 1.9.1. See the [`-read-replica`](#_read_replica) flag instead.**
-
-- `-read-replica` ((#\_read_replica)) - This
- flag is used to make the server not participate in the Raft quorum, and have it
- only receive the data replication stream. This can be used to add read scalability
- to a cluster in cases where a high volume of reads to servers are needed.
-
-## UI Options
-
-- `-ui` ((#\_ui)) - Enables the built-in web UI server and the required
- HTTP routes. This eliminates the need to maintain the Consul web UI files separately
- from the binary.
-
-- `-ui-dir` ((#\_ui_dir)) - This flag provides the directory containing
- the Web UI resources for Consul. This will automatically enable the Web UI. The
- directory must be readable to the agent. Starting with Consul version 0.7.0 and
- later, the Web UI assets are included in the binary so this flag is no longer necessary;
- specifying only the `-ui` flag is enough to enable the Web UI. Specifying both
- the '-ui' and '-ui-dir' flags will result in an error.
-
-- `-ui-content-path` ((#\_ui\_content\_path)) - This flag provides the option
- to change the path the Consul UI loads from and will be displayed in the browser.
- By default, the path is `/ui/`, for example `http://localhost:8500/ui/`. Only alphanumerics,
- `-`, and `_` are allowed in a custom path. `/v1/` is not allowed as it would overwrite
- the API endpoint.
-
-{/* list of reference-style links */}
-
-[go-sockaddr]: https://godoc.org/github.com/hashicorp/go-sockaddr/template
diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx
deleted file mode 100644
index 63e8137c4f06..000000000000
--- a/website/content/docs/agent/config/config-files.mdx
+++ /dev/null
@@ -1,2369 +0,0 @@
----
-layout: docs
-page_title: Agents - Configuration File Reference
-description: >-
- Use agent configuration files to assign attributes to agents and configure multiple agents at once. Learn about agent configuration file parameters and formatting with this reference page and sample code.
----
-
-# Agents Configuration File Reference ((#configuration_files))
-
-This topic describes the parameters for configuring Consul agents. For information about how to start Consul agents, refer to [Starting the Consul Agent](/consul/docs/agent#starting-the-consul-agent).
-
-## Overview
-
-You can create one or more files to configure the Consul agent on startup. We recommend
-grouping similar configurations into separate files, such as ACL parameters, to make it
-easier to manage configuration changes. Using external files may be easier than
-configuring agents on the command-line when Consul is
-being configured using a configuration management system.
-
-The configuration files are formatted as HCL or JSON. JSON formatted configs are
-easily readable and editable by both humans and computers. JSON formatted
-configuration consists of a single JSON object with multiple configuration keys
-specified within it.
-
-
-
-```hcl
-datacenter = "east-aws"
-data_dir = "/opt/consul"
-log_level = "INFO"
-node_name = "foobar"
-server = true
-watches = [
- {
- type = "checks"
- handler = "/usr/bin/health-check-handler.sh"
- }
-]
-
-telemetry {
- statsite_address = "127.0.0.1:2180"
-}
-```
-
-```json
-{
- "datacenter": "east-aws",
- "data_dir": "/opt/consul",
- "log_level": "INFO",
- "node_name": "foobar",
- "server": true,
- "watches": [
- {
- "type": "checks",
- "handler": "/usr/bin/health-check-handler.sh"
- }
- ],
- "telemetry": {
- "statsite_address": "127.0.0.1:2180"
- }
-}
-```
-
-
-
-### Time-to-live values
-
-Consul uses the Go `time` package to parse all time-to-live (TTL) values used in Consul agent configuration files. Specify integer and float values as a string and include one or more of the following units of time:
-
-- `ns`
-- `us`
-- `µs`
-- `ms`
-- `s`
-- `m`
-- `h`
-
-Examples:
-
-- `'300ms'`
-- `'1.5h'`
-- `'2h45m'`
-
-Refer to the [formatting specification](https://golang.org/pkg/time/#ParseDuration) for additional information.
-
-## General parameters
-
-- `addresses` - This is a nested object that allows setting
- bind addresses. In Consul 1.0 and later these can be set to a space-separated list
- of addresses to bind to, or a [go-sockaddr] template that can potentially resolve to multiple addresses.
-
- `http`, `https` and `grpc` all support binding to a Unix domain socket. A
- socket can be specified in the form `unix:///path/to/socket`. A new domain
- socket will be created at the given path. If the specified file path already
- exists, Consul will attempt to clear the file and create the domain socket
- in its place. The permissions of the socket file are tunable via the
- [`unix_sockets` config construct](#unix_sockets).
-
- When running Consul agent commands against Unix socket interfaces, use the
- `-http-addr` argument to specify the path to the socket. You can also place
- the desired values in the `CONSUL_HTTP_ADDR` environment variable.
-
- For TCP addresses, the environment variable value should be an IP address
- _with the port_. For example: `10.0.0.1:8500` and not `10.0.0.1`. However,
- ports are set separately in the [`ports`](#ports) structure when
- defining them in a configuration file.
-
- The following keys are valid:
-
- - `dns` - The DNS server. Defaults to `client_addr`
- - `http` - The HTTP API. Defaults to `client_addr`
- - `https` - The HTTPS API. Defaults to `client_addr`
- - `grpc` - The gRPC API. Defaults to `client_addr`
- - `grpc_tls` - The gRPC API with TLS. Defaults to `client_addr`
-
-- `alt_domain` Equivalent to the [`-alt-domain` command-line flag](/consul/docs/agent/config/cli-flags#_alt_domain)
-
-- `audit` - Added in Consul 1.8, the audit object allow users to enable auditing
- and configure a sink and filters for their audit logs. For more information, review the [audit log tutorial](/consul/tutorials/datacenter-operations/audit-logging).
-
-
-
- ```hcl
- audit {
- enabled = true
- sink "My sink" {
- type = "file"
- format = "json"
- path = "data/audit/audit.json"
- delivery_guarantee = "best-effort"
- rotate_duration = "24h"
- rotate_max_files = 15
- rotate_bytes = 25165824
- }
- }
- ```
-
- ```json
- {
- "audit": {
- "enabled": true,
- "sink": {
- "My sink": {
- "type": "file",
- "format": "json",
- "path": "data/audit/audit.json",
- "delivery_guarantee": "best-effort",
- "rotate_duration": "24h",
- "rotate_max_files": 15,
- "rotate_bytes": 25165824
- }
- }
- }
- }
- ```
-
-
-
- The following sub-keys are available:
-
- - `enabled` - Controls whether Consul logs out each time a user
- performs an operation. ACLs must be enabled to use this feature. Defaults to `false`.
-
- - `sink` - This object provides configuration for the destination to which
- Consul will log auditing events. Sink is an object containing keys to sink objects, where the key is the name of the sink.
-
- - `type` - Type specifies what kind of sink this is.
- The following keys are valid:
- - `file` - Currently only file sinks are available, they take the following keys.
- - `format` - Format specifies what format the events will
- be emitted with.
- The following keys are valid:
- - `json` - Currently only json events are offered.
- - `path` - The directory and filename to write audit events to.
- - `delivery_guarantee` - Specifies
- the rules governing how audit events are written.
- The following keys are valid:
- - `best-effort` - Consul only supports `best-effort` event delivery.
- - `mode` - The permissions to set on the audit log files.
- - `rotate_duration` - Specifies the
- interval by which the system rotates to a new log file. At least one of `rotate_duration` or `rotate_bytes`
- must be configured to enable audit logging.
- - `rotate_max_files` - Defines the
- limit that Consul should follow before it deletes old log files.
- - `rotate_bytes` - Specifies how large an
- individual log file can grow before Consul rotates to a new file. At least one of `rotate_bytes` or
- `rotate_duration` must be configured to enable audit logging.
-
-- `autopilot` Added in Consul 0.8, this object allows a
- number of sub-keys to be set which can configure operator-friendly settings for
- Consul servers. When these keys are provided as configuration, they will only be
- respected on bootstrapping. If they are not provided, the defaults will be used.
- In order to change the value of these options after bootstrapping, you will need
- to use the [Consul Operator Autopilot](/consul/commands/operator/autopilot)
- command. For more information about Autopilot, review the [Autopilot tutorial](/consul/tutorials/datacenter-operations/autopilot-datacenter-operations).
-
- The following sub-keys are available:
-
- - `cleanup_dead_servers` - This controls the
- automatic removal of dead server nodes periodically and whenever a new server
- is added to the cluster. Defaults to `true`.
-
- - `last_contact_threshold` - Controls the
- maximum amount of time a server can go without contact from the leader before
- being considered unhealthy. Must be a duration value such as `10s`. Defaults
- to `200ms`.
-
- - `max_trailing_logs` - Controls the maximum number
- of log entries that a server can trail the leader by before being considered
- unhealthy. Defaults to 250.
-
- - `min_quorum` - Sets the minimum number of servers necessary
- in a cluster. Autopilot will stop pruning dead servers when this minimum is reached. There is no default.
-
- - `server_stabilization_time` - Controls
- the minimum amount of time a server must be stable in the 'healthy' state before
- being added to the cluster. Only takes effect if all servers are running Raft
- protocol version 3 or higher. Must be a duration value such as `30s`. Defaults
- to `10s`.
-
- - `redundancy_zone_tag` -
- This controls the [`node_meta`](#node_meta) key to use when Autopilot is separating
- servers into zones for redundancy. Only one server in each zone can be a voting
- member at one time. If left blank (the default), this feature will be disabled.
-
- - `disable_upgrade_migration` -
- If set to `true`, this setting will disable Autopilot's upgrade migration strategy
- in Consul Enterprise of waiting until enough newer-versioned servers have been
- added to the cluster before promoting any of them to voters. Defaults to `false`.
-
- - `upgrade_version_tag` -
- The node_meta tag to use for version info when performing upgrade migrations.
- If this is not set, the Consul version will be used.
-
-- `auto_config` This object allows setting options for the `auto_config` feature.
-
- The following sub-keys are available:
-
- - `enabled` (Defaults to `false`) This option enables `auto_config` on a client
- agent. When starting up but before joining the cluster, the client agent will
- make an RPC to the configured server addresses to request configuration settings,
- such as its `agent` ACL token, TLS certificates, Gossip encryption key as well
- as other configuration settings. These configurations get merged in as defaults
- with any user-supplied configuration on the client agent able to override them.
- The initial RPC uses a JWT specified with either `intro_token`,
- `intro_token_file` or the `CONSUL_INTRO_TOKEN` environment variable to authorize
- the request. How the JWT token is verified is controlled by the `auto_config.authorizer`
- object available for use on Consul servers. Enabling this option also enables
- service mesh because it is vital for `auto_config`, more specifically the service mesh CA
- and certificates infrastructure.
-
- ~> **Warning:** Enabling `auto_config` conflicts with the [`auto_encrypt.tls`](#tls) feature.
- Only one option may be specified.
-
- - `intro_token` (Defaults to `""`) This specifies the JWT to use for the initial
- `auto_config` RPC to the Consul servers. This can be overridden with the
- `CONSUL_INTRO_TOKEN` environment variable
-
- - `intro_token_file` (Defaults to `""`) This specifies a file containing the JWT
- to use for the initial `auto_config` RPC to the Consul servers. This token
- from this file is only loaded if the `intro_token` configuration is unset as
- well as the `CONSUL_INTRO_TOKEN` environment variable
-
- - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in
- the local datacenter to use for the initial RPC. These addresses support
- [Cloud Auto-Joining](/consul/docs/agent/config/cli-flags#cloud-auto-joining) and can optionally include a port to
- use when making the outbound connection. If no port is provided, the `server_port`
- will be used.
-
- - `dns_sans` (Defaults to `[]`) This is a list of extra DNS SANs to request in the
- client agent's TLS certificate. The `localhost` DNS SAN is always requested.
-
- - `ip_sans` (Defaults to `[]`) This is a list of extra IP SANs to request in the
- client agent's TLS certificate. The `::1` and `127.0.0.1` IP SANs are always requested.
-
- - `authorization` This object controls how a Consul server will authorize `auto_config`
- requests and in particular how to verify the JWT intro token.
-
- - `enabled` (Defaults to `false`) This option enables `auto_config` authorization
- capabilities on the server.
-
- - `static` This object controls configuring the static authorizer setup in the Consul
- configuration file. Almost all sub-keys are identical to those provided by the [JWT
- Auth Method](/consul/docs/security/acl/auth-methods/jwt).
-
- - `jwt_validation_pub_keys` (Defaults to `[]`) A list of PEM-encoded public keys
- to use to authenticate signatures locally.
-
- Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required.
-
- - `oidc_discovery_url` (Defaults to `""`) The OIDC Discovery URL, without any
- .well-known component (base path).
-
- Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required.
-
- - `oidc_discovery_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS
- client used to talk with the OIDC Discovery URL. NOTE: Every line must end
- with a newline (`\n`). If not set, system certificates are used.
-
- - `jwks_url` (Defaults to `""`) The JWKS URL to use to authenticate signatures.
-
- Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required.
-
- - `jwks_ca_cert` (Defaults to `""`) PEM encoded CA cert for use by the TLS client
- used to talk with the JWKS URL. NOTE: Every line must end with a newline
- (`\n`). If not set, system certificates are used.
-
- - `claim_mappings` (Defaults to `(map[string]string)` Mappings of claims (key) that
- will be copied to a metadata field (value). Use this if the claim you are capturing
- is singular (such as an attribute).
-
- When mapped, the values can be any of a number, string, or boolean and will
- all be stringified when returned.
-
- - `list_claim_mappings` (Defaults to `(map[string]string)`) Mappings of claims (key)
- will be copied to a metadata field (value). Use this if the claim you are capturing
- is list-like (such as groups).
-
- When mapped, the values in each list can be any of a number, string, or
- boolean and will all be stringified when returned.
-
- - `jwt_supported_algs` (Defaults to `["RS256"]`) JWTSupportedAlgs is a list of
- supported signing algorithms.
-
- - `bound_audiences` (Defaults to `[]`) List of `aud` claims that are valid for
- login; any match is sufficient.
-
- - `bound_issuer` (Defaults to `""`) The value against which to match the `iss`
- claim in a JWT.
-
- - `expiration_leeway` (Defaults to `"0s"`) Duration of leeway when
- validating expiration of a token to account for clock skew. Defaults to 150s
- (2.5 minutes) if set to 0s and can be disabled if set to -1ns.
-
- - `not_before_leeway` (Defaults to `"0s"`) Duration of leeway when
- validating not before values of a token to account for clock skew. Defaults
- to 150s (2.5 minutes) if set to 0s and can be disabled if set to -1.
-
- - `clock_skew_leeway` (Defaults to `"0s"`) Duration of leeway when
- validating all claims to account for clock skew. Defaults to 60s (1 minute)
- if set to 0s and can be disabled if set to -1ns.
-
- - `claim_assertions` (Defaults to `[]`) List of assertions about the mapped
- claims required to authorize the incoming RPC request. The syntax uses
- [github.com/hashicorp/go-bexpr](https://github.com/hashicorp/go-bexpr) which is shared with the
- [API filtering feature](/consul/api-docs/features/filtering). For example, the following
- configurations when combined will ensure that the JWT `sub` matches the node
- name requested by the client.
-
-
-
- ```hcl
- claim_mappings {
- sub = "node_name"
- }
- claim_assertions = [
- "value.node_name == \"${node}\""
- ]
- ```
-
- ```json
- {
- "claim_mappings": {
- "sub": "node_name"
- },
- "claim_assertions": ["value.node_name == \"${node}\""]
- }
- ```
-
-
-
- The assertions are lightly templated using [HIL syntax](https://github.com/hashicorp/hil)
- to interpolate some values from the RPC request. The list of variables that can be interpolated
- are:
-
- - `node` - The node name the client agent is requesting.
-
- - `segment` - The network segment name the client is requesting.
-
- - `partition` - The admin partition name the client is requesting.
-
-- `auto_reload_config` Equivalent to the [`-auto-reload-config` command-line flag](/consul/docs/agent/config/cli-flags#_auto_reload_config).
-
-- `bind_addr` Equivalent to the [`-bind` command-line flag](/consul/docs/agent/config/cli-flags#_bind).
-
- This parameter can be set to a go-sockaddr template that resolves to a single
- address. Special characters such as backslashes `\` or double quotes `"`
- within a double quoted string value must be escaped with a backslash `\`.
- Some example templates:
-
-
-
- ```hcl
- bind_addr = "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}"
- ```
-
- ```json
- {
- "bind_addr": "{{ GetPrivateInterfaces | include \"network\" \"10.0.0.0/8\" | attr \"address\" }}"
- }
- ```
-
-
-
-- `cache` configuration for client agents. When an `?index` query parameter is specified but '?cached' is not appended in a [streaming backend call](/consul/api-docs/features/blocking#streaming-backend), Consul bypasses these configuration values. The configurable values are the following:
-
- - `entry_fetch_max_burst` The size of the token bucket used to recharge the rate-limit per
- cache entry. The default value is 2 and means that when cache has not been updated
- for a long time, 2 successive queries can be made as long as the rate-limit is not
- reached.
-
- - `entry_fetch_rate` configures the rate-limit at which the cache may refresh a single
- entry. On a cluster with many changes/s, watching changes in the cache might put high
- pressure on the servers. This ensures the number of requests for a single cache entry
- will never go beyond this limit, even when a given service changes every 1/100s.
- Since this is a per cache entry limit, having a highly unstable service will only rate
- limit the watched on this service, but not the other services/entries.
- The value is strictly positive, expressed in queries per second as a float,
- 1 means 1 query per second, 0.1 mean 1 request every 10s maximum.
- The default value is "No limit" and should be tuned on large
- clusters to avoid performing too many RPCs on entries changing a lot.
-
-- `check_update_interval` ((#check_update_interval))
- This interval controls how often check output from checks in a steady state is
- synchronized with the server. By default, this is set to 5 minutes ("5m"). Many
- checks which are in a steady state produce slightly different output per run (timestamps,
- etc) which cause constant writes. This configuration allows deferring the sync
- of check output for a given interval to reduce write pressure. If a check ever
- changes state, the new state and associated output is synchronized immediately.
- To disable this behavior, set the value to "0s".
-
-- `client_addr` Equivalent to the [`-client` command-line flag](/consul/docs/agent/config/cli-flags#_client).
-
-- `config_entries` This object allows setting options for centralized config entries.
-
- The following sub-keys are available:
-
- - `bootstrap` ((#config_entries_bootstrap))
- This is a list of inlined config entries to insert into the state store when
- the Consul server gains leadership. This option is only applicable to server
- nodes. Each bootstrap entry will be created only if it does not exist. When reloading,
- any new entries that have been added to the configuration will be processed.
- See the [configuration entry docs](/consul/docs/agent/config-entries) for more
- details about the contents of each entry.
-
-- `datacenter` Equivalent to the [`-datacenter` command-line flag](/consul/docs/agent/config/cli-flags#_datacenter).
-
-- `data_dir` Equivalent to the [`-data-dir` command-line flag](/consul/docs/agent/config/cli-flags#_data_dir).
-
-- `default_intention_policy` Controls how service-to-service traffic is authorized
- in the absence of specific intentions.
- Can be set to `allow`, `deny`, or left empty to default to [`acl.default_policy`](#acl_default_policy).
-
-- `disable_anonymous_signature` Disables providing an anonymous
- signature for de-duplication with the update check. See [`disable_update_check`](#disable_update_check).
-
-- `disable_http_unprintable_char_filter` Defaults to false. Consul 1.0.3 fixed a potential security vulnerability where malicious users could craft KV keys with unprintable chars that would confuse operators using the CLI or UI into taking wrong actions. Users who had data written in older versions of Consul that did not have this restriction will be unable to delete those values by default in 1.0.3 or later. This setting enables those users to **temporarily** disable the filter such that delete operations can work on those keys again to get back to a healthy state. It is strongly recommended that this filter is not disabled permanently as it exposes the original security vulnerability.
-
-- `disable_remote_exec` Disables support for remote execution. When set to true, the agent will ignore
- any incoming remote exec requests. In versions of Consul prior to 0.8, this defaulted
- to false. In Consul 0.8 the default was changed to true, to make remote exec opt-in
- instead of opt-out.
-
-- `disable_update_check` Disables automatic checking for security bulletins and new version releases. This is disabled in Consul Enterprise.
-
-- `discard_check_output` Discards the output of health checks before storing them. This reduces the number of writes to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ...
-
-- `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is
- equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server can handle the service discovery request. If a Consul server is behind the leader by more than `discovery_max_stale`, the query will be re-evaluated on the leader to get more up-to-date results. Consul agents also add a new `X-Consul-Effective-Consistency` response header which indicates if the agent did a stale read. `discover-max-stale` was introduced in Consul 1.0.7 as a way for Consul operators to force stale requests from clients at the agent level, and defaults to zero which matches default consistency behavior in earlier Consul versions.
-
-- `enable_agent_tls_for_checks` When set, uses a subset of the agent's TLS configuration (`key_file`,
- `cert_file`, `ca_file`, `ca_path`, and `server_name`) to set up the client for HTTP or gRPC health checks. This allows services requiring 2-way TLS to be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false.
-
-- `enable_central_service_config` When set, the Consul agent will look for any
- [centralized service configuration](/consul/docs/agent/config-entries)
- that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations.
- This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later.
-
-- `enable_debug` (boolean, default is `false`): When set to `true`, enables Consul to report additional debugging information, including runtime profiling (`pprof`) data. This setting is only required for clusters without ACL [enabled](#acl_enabled). If you change this setting, you must restart the agent for the change to take effect.
-
-- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/consul/docs/agent/config/cli-flags#_enable_script_checks).
-
- ACLs must be enabled for agents and the `enable_script_checks` option must be set to `true` to enable script checks in Consul 0.9.0 and later. See [Registering and Querying Node Information](/consul/docs/security/acl/acl-rules#registering-and-querying-node-information) for related information.
-
- ~> **Security Warning:** Enabling script checks in some configurations may introduce a known remote execution vulnerability targeted by malware. We strongly recommend `enable_local_script_checks` instead. Refer to the following article for additional guidance: [_Protecting Consul from RCE Risk in Specific Configurations_](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations)
- for more details.
-
-- `enable_local_script_checks` Equivalent to the [`-enable-local-script-checks` command-line flag](/consul/docs/agent/config/cli-flags#_enable_local_script_checks).
-
-- `disable_keyring_file` - Equivalent to the
- [`-disable-keyring-file` command-line flag](/consul/docs/agent/config/cli-flags#_disable_keyring_file).
-
-- `disable_coordinates` - Disables sending of [network coordinates](/consul/docs/architecture/coordinates).
- When network coordinates are disabled the `near` query param will not work to sort the nodes,
- and the [`consul rtt`](/consul/commands/rtt) command will not be able to provide round trip time between nodes.
-
-- `http_config` This object allows setting options for the HTTP API and UI.
-
- The following sub-keys are available:
-
- - `block_endpoints`
- This object is a list of HTTP API endpoint prefixes to block on the agent, and
- defaults to an empty list, meaning all endpoints are enabled. Any endpoint that
- has a common prefix with one of the entries on this list will be blocked and
- will return a 403 response code when accessed. For example, to block all of the
- V1 ACL endpoints, set this to `["/v1/acl"]`, which will block `/v1/acl/create`,
- `/v1/acl/update`, and the other ACL endpoints that begin with `/v1/acl`. This
- only works with API endpoints, not `/ui` or `/debug`, those must be disabled
- with their respective configuration options. Any CLI commands that use disabled
- endpoints will no longer function as well. For more general access control, Consul's
- [ACL system](/consul/tutorials/security/access-control-setup-production)
- should be used, but this option is useful for removing access to HTTP API endpoints
- completely, or on specific agents. This is available in Consul 0.9.0 and later.
-
- - `response_headers` This object allows adding headers to the HTTP API and UI responses. For example, the following config can be used to enable [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) on the HTTP API endpoints:
-
-
-
- ```hcl
- http_config {
- response_headers {
- Access-Control-Allow-Origin = "*"
- }
- }
- ```
-
- ```json
- {
- "http_config": {
- "response_headers": {
- "Access-Control-Allow-Origin": "*"
- }
- }
- }
- ```
-
-
-
- - `allow_write_http_from` This object is a list of networks in CIDR notation (eg "127.0.0.0/8") that are allowed to call the agent write endpoints. It defaults to an empty list, which means all networks are allowed. This is used to make the agent read-only, except for select ip ranges. - To block write calls from anywhere, use `[ "255.255.255.255/32" ]`. - To only allow write calls from localhost, use `[ "127.0.0.0/8" ]` - To only allow specific IPs, use `[ "10.0.0.1/32", "10.0.0.2/32" ]`
-
- - `use_cache` ((#http_config_use_cache)) Defaults to true. If disabled, the agent won't be using [agent caching](/consul/api-docs/features/caching) to answer the request. Even when the url parameter is provided.
-
- - `max_header_bytes` This setting controls the maximum number of bytes the consul http server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body. If zero, or negative, http.DefaultMaxHeaderBytes is used, which equates to 1 Megabyte.
-
-- `leave_on_terminate` If enabled, when the agent receives a TERM signal, it will send a `Leave` message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server (prior to Consul 0.7 the default value was unconditionally set to `false`). On agents in client-mode, this defaults to `true` and for agents in server-mode, this defaults to `false`.
-
-- `license_path` This specifies the path to a file that contains the Consul Enterprise license. Alternatively the license may also be specified in either the `CONSUL_LICENSE` or `CONSUL_LICENSE_PATH` environment variables. See the [licensing documentation](/consul/docs/enterprise/license/overview) for more information about Consul Enterprise license management. Added in versions 1.10.0, 1.9.7 and 1.8.13. Prior to version 1.10.0 the value may be set for all agents to facilitate forwards compatibility with 1.10 but will only actually be used by client agents.
-
-- `limits`: This block specifies various types of limits that the Consul server agent enforces.
-
- - `http_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single client IP address is allowed to open to the agent's HTTP(S) server. This affects the HTTP(S) servers in both client and server agents. Default value is `200`.
- - `https_handshake_timeout` - Configures the limit for how long the HTTPS server in both client and server agents will wait for a client to complete a TLS handshake. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). Default value is `5s`.
- - `request_limits` - This object specifies configurations that limit the rate of RPC and gRPC requests on the Consul server. Limiting the rate of gRPC and RPC requests also limits HTTP requests to the Consul server.
- - `mode` - String value that specifies an action to take if the rate of requests exceeds the limit. You can specify the following values:
- - `permissive`: The server continues to allow requests and records an error in the logs.
- - `enforcing`: The server stops accepting requests and records an error in the logs.
- - `disabled`: Limits are not enforced or tracked. This is the default value for `mode`.
- - `read_rate` - Integer value that specifies the number of read requests per second. Default is `-1` which represents infinity.
- - `write_rate` - Integer value that specifies the number of write requests per second. Default is `-1` which represents infinity.
- - `rpc_handshake_timeout` - Configures the limit for how long servers will wait after a client TCP connection is established before they complete the connection handshake. When TLS is used, the same timeout applies to the TLS handshake separately from the initial protocol negotiation. All Consul clients should perform this immediately on establishing a new connection. This should be kept conservative as it limits how many connections an unauthenticated attacker can open if `verify_incoming` is being using to authenticate clients (strongly recommended in production). When `verify_incoming` is true on servers, this limits how long the connection socket and associated goroutines will be held open before the client successfully authenticates. Default value is `5s`.
- - `rpc_client_timeout` - Configures the limit for how long a client is allowed to read from an RPC connection. This is used to set an upper bound for calls to eventually terminate so that RPC connections are not held indefinitely. Blocking queries can override this timeout. Default is `60s`.
- - `rpc_max_conns_per_client` - Configures a limit of how many concurrent TCP connections a single source IP address is allowed to open to a single server. It affects both clients connections and other server connections. In general Consul clients multiplex many RPC calls over a single TCP connection so this can typically be kept low. It needs to be more than one though since servers open at least one additional connection for raft RPC, possibly more for WAN federation when using network areas, and snapshot requests from clients run over a separate TCP conn. A reasonably low limit significantly reduces the ability of an unauthenticated attacker to consume unbounded resources by holding open many connections. You may need to increase this if WAN federated servers connect via proxies or NAT gateways or similar causing many legitimate connections from a single source IP. Default value is `100` which is designed to be extremely conservative to limit issues with certain deployment patterns. Most deployments can probably reduce this safely. 100 connections on modern server hardware should not cause a significant impact on resource usage from an unauthenticated attacker though.
- - `rpc_rate` - Configures the RPC rate limiter on Consul _clients_ by setting the maximum request rate that this agent is allowed to make for RPC requests to Consul servers, in requests per second. Defaults to infinite, which disables rate limiting.
- - `rpc_max_burst` - The size of the token bucket used to recharge the RPC rate limiter on Consul _clients_. Defaults to 1000 tokens, and each token is good for a single RPC call to a Consul server. See https://en.wikipedia.org/wiki/Token_bucket for more details about how token bucket rate limiters operate.
- - `kv_max_value_size` - **(Advanced)** Configures the maximum number of bytes for a kv request body to the [`/v1/kv`](/consul/api-docs/kv) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration. This option affects the txn endpoint too, but Consul 1.7.2 introduced `txn_max_req_len` which is the preferred way to set the limit for the txn endpoint. If both limits are set, the higher one takes precedence.
- - `txn_max_req_len` - **(Advanced)** Configures the maximum number of bytes for a transaction request body to the [`/v1/txn`](/consul/api-docs/txn) endpoint. This limit defaults to [raft's](https://github.com/hashicorp/raft) suggested max size (512KB). **Note that tuning these improperly can cause Consul to fail in unexpected ways**, it may potentially affect leadership stability and prevent timely heartbeat signals by increasing RPC IO duration.
-
-- `default_query_time` Equivalent to the [`-default-query-time` command-line flag](/consul/docs/agent/config/cli-flags#_default_query_time).
-
-- `max_query_time` Equivalent to the [`-max-query-time` command-line flag](/consul/docs/agent/config/cli-flags#_max_query_time).
-
-- `peering` This object allows setting options for cluster peering.
-
- The following sub-keys are available:
-
- - `enabled` ((#peering_enabled)) (Defaults to `true`) Controls whether cluster peering is enabled.
- When disabled, the UI won't show peering, all peering APIs will return
- an error, any peerings stored in Consul already will be ignored (but they will not be deleted),
- and all peering connections from other clusters will be rejected. This was added in Consul 1.13.0.
-
-- `partition` - This flag is used to set
- the name of the admin partition the agent belongs to. An agent can only join
- and communicate with other agents within its admin partition. Review the
- [Admin Partitions documentation](/consul/docs/enterprise/admin-partitions) for more
- details. By default, this is an empty string, which is the `default` admin
- partition. This cannot be set on a server agent.
-
- ~> **Warning:** The `partition` option cannot be used either the
- [`segment`](#segment-2) option or [`-segment`](/consul/docs/agent/config/cli-flags#_segment) flag.
-
-- `performance` Available in Consul 0.7 and later, this is a nested object that allows tuning the performance of different subsystems in Consul. See the [Server Performance](/consul/docs/install/performance) documentation for more details. The following parameters are available:
-
- - `leave_drain_time` - A duration that a server will dwell during a graceful leave in order to allow requests to be retried against other Consul servers. Under normal circumstances, this can prevent clients from experiencing "no leader" errors when performing a rolling update of the Consul servers. This was added in Consul 1.0. Must be a duration value such as 10s. Defaults to 5s.
-
- - `raft_multiplier` - An integer multiplier used by Consul servers to scale key Raft timing parameters. Omitting this value or setting it to 0 uses default timing described below. Lower values are used to tighten timing and increase sensitivity while higher values relax timings and reduce sensitivity. Tuning this affects the time it takes Consul to detect leader failures and to perform leader elections, at the expense of requiring more network and CPU resources for better performance.
-
- By default, Consul will use a lower-performance timing that's suitable
- for [minimal Consul servers](/consul/docs/install/performance#minimum), currently equivalent
- to setting this to a value of 5 (this default may be changed in future versions of Consul,
- depending if the target minimum server profile changes). Setting this to a value of 1 will
- configure Raft to its highest-performance mode, equivalent to the default timing of Consul
- prior to 0.7, and is recommended for [production Consul servers](/consul/docs/install/performance#production).
-
- See the note on [last contact](/consul/docs/install/performance#production-server-requirements) timing for more
- details on tuning this parameter. The maximum allowed value is 10.
-
- - `rpc_hold_timeout` - A duration that a client
- or server will retry internal RPC requests during leader elections. Under normal
- circumstances, this can prevent clients from experiencing "no leader" errors.
- This was added in Consul 1.0. Must be a duration value such as 10s. Defaults
- to 7s.
-
- - `grpc_keepalive_interval` - A duration that determines the frequency that Consul servers send keep-alive messages to inactive gRPC clients. Configure this setting to modify how quickly Consul detects and removes improperly closed xDS or peering connections. Default is `30s`.
-
- - `grpc_keepalive_timeout` - A duration that determines how long a Consul server waits for a reply to a keep-alive message. If the server does not receive a reply before the end of the duration, Consul flags the gRPC connection as unhealthy and forcibly removes it. Defaults to `20s`.
-
-- `pid_file` Equivalent to the [`-pid-file` command line flag](/consul/docs/agent/config/cli-flags#_pid_file).
-
-- `ports` This is a nested object that allows setting the bind ports for the following keys:
-
- - `dns` ((#dns_port)) - The DNS server, -1 to disable. Default 8600.
- TCP and UDP.
- - `http` ((#http_port)) - The HTTP API, -1 to disable. Default 8500.
- TCP only.
- - `https` ((#https_port)) - The HTTPS API, -1 to disable. Default -1
- (disabled). **We recommend using `8501`** for `https` by convention as some tooling
- will work automatically with this.
- - `grpc` ((#grpc_port)) - The gRPC API, -1 to disable. Default -1 (disabled).
- **We recommend using `8502` for `grpc`** as your conventional gRPC port number, as it allows some
- tools to work automatically. This parameter is set to `8502` by default when the agent runs
- in `-dev` mode. The `grpc` port only supports plaintext traffic starting in Consul 1.14.
- Refer to `grpc_tls` for more information on configuring a TLS-enabled port.
- - `grpc_tls` ((#grpc_tls_port)) - The gRPC API with TLS connections, -1 to disable. gRPC_TLS is enabled by default on port 8503 for Consul servers.
- **We recommend using `8503` for `grpc_tls`** as your conventional gRPC port number, as it allows some
- tools to work automatically. `grpc_tls` is always guaranteed to be encrypted. Both `grpc` and `grpc_tls`
- can be configured at the same time, but they may not utilize the same port number. This field was added in Consul 1.14.
- - `serf_lan` ((#serf_lan_port)) - The Serf LAN port. Default 8301. TCP
- and UDP. Equivalent to the [`-serf-lan-port` command line flag](/consul/docs/agent/config/cli-flags#_serf_lan_port).
- - `serf_wan` ((#serf_wan_port)) - The Serf WAN port. Default 8302.
- Equivalent to the [`-serf-wan-port` command line flag](/consul/docs/agent/config/cli-flags#_serf_wan_port). Set
- to -1 to disable. **Note**: this will disable WAN federation which is not recommended.
- Various catalog and WAN related endpoints will return errors or empty results.
- TCP and UDP.
- - `server` ((#server_rpc_port)) - Server RPC address. Default 8300. TCP
- only.
- - `sidecar_min_port` ((#sidecar_min_port)) - Inclusive minimum port number
- to use for automatically assigned [sidecar service registrations](/consul/docs/connect/proxies/deploy-sidecar-services).
- Default 21000. Set to `0` to disable automatic port assignment.
- - `sidecar_max_port` ((#sidecar_max_port)) - Inclusive maximum port number
- to use for automatically assigned [sidecar service registrations](/consul/docs/connect/proxies/deploy-sidecar-services).
- Default 21255. Set to `0` to disable automatic port assignment.
- - `expose_min_port` ((#expose_min_port)) - Inclusive minimum port number
- to use for automatically assigned [exposed check listeners](/consul/docs/connect/proxies/proxy-config-reference#expose-paths-configuration-reference).
- Default 21500. Set to `0` to disable automatic port assignment.
- - `expose_max_port` ((#expose_max_port)) - Inclusive maximum port number
- to use for automatically assigned [exposed check listeners](/consul/docs/connect/proxies/proxy-config-reference#expose-paths-configuration-reference).
- Default 21755. Set to `0` to disable automatic port assignment.
-
-- `primary_datacenter` - This designates the datacenter
- which is authoritative for ACL information, intentions and is the root Certificate
- Authority for service mesh. It must be provided to enable ACLs. All servers and datacenters
- must agree on the primary datacenter. Setting it on the servers is all you need
- for cluster-level enforcement, but for the APIs to forward properly from the clients,
- it must be set on them too. In Consul 0.8 and later, this also enables agent-level
- enforcement of ACLs.
-
-- `primary_gateways` Equivalent to the [`-primary-gateway`
- command-line flag](/consul/docs/agent/config/cli-flags#_primary_gateway). Takes a list of addresses to use as the
- mesh gateways for the primary datacenter when authoritative replicated catalog
- data is not present. Discovery happens every [`primary_gateways_interval`](#primary_gateways_interval)
- until at least one primary mesh gateway is discovered. This was added in Consul
- 1.8.0.
-
-- `primary_gateways_interval` Time to wait
- between [`primary_gateways`](#primary_gateways) discovery attempts. Defaults to
- 30s. This was added in Consul 1.8.0.
-
-- `protocol` ((#protocol)) Equivalent to the [`-protocol` command-line
- flag](/consul/docs/agent/config/cli-flags#_protocol).
-
-- `reap` This controls Consul's automatic reaping of child processes,
- which is useful if Consul is running as PID 1 in a Docker container. If this isn't
- specified, then Consul will automatically reap child processes if it detects it
- is running as PID 1. If this is set to true or false, then it controls reaping
- regardless of Consul's PID (forces reaping on or off, respectively). This option
- was removed in Consul 0.7.1. For later versions of Consul, you will need to reap
- processes using a wrapper, please see the [Consul Docker image entry point script](https://github.com/hashicorp/docker-consul/blob/master/0.X/docker-entrypoint.sh)
- for an example. If you are using Docker 1.13.0 or later, you can use the new `--init`
- option of the `docker run` command and docker will enable an init process with
- PID 1 that reaps child processes for the container. More info on [Docker docs](https://docs.docker.com/engine/reference/commandline/run/#options).
-
-- `reconnect_timeout` This controls how long it
- takes for a failed node to be completely removed from the cluster. This defaults
- to 72 hours and it is recommended that this is set to at least double the maximum
- expected recoverable outage time for a node or network partition. WARNING: Setting
- this time too low could cause Consul servers to be removed from quorum during an
- extended node failure or partition, which could complicate recovery of the cluster.
- The value is a time with a unit suffix, which can be "s", "m", "h" for seconds,
- minutes, or hours. The value must be >= 8 hours.
-
-- `reconnect_timeout_wan` This is the WAN equivalent
- of the [`reconnect_timeout`](#reconnect_timeout) parameter, which controls
- how long it takes for a failed server to be completely removed from the WAN pool.
- This also defaults to 72 hours, and must be >= 8 hours.
-
-- `recursors` This flag provides addresses of upstream DNS
- servers that are used to recursively resolve queries if they are not inside the
- service domain for Consul. For example, a node can use Consul directly as a DNS
- server, and if the record is outside of the "consul." domain, the query will be
- resolved upstream. As of Consul 1.0.1 recursors can be provided as IP addresses
- or as go-sockaddr templates. IP addresses are resolved in order, and duplicates
- are ignored.
-
-- `rpc` configuration for Consul servers.
-
- - `enable_streaming` ((#rpc_enable_streaming)) defaults to true. If set to false it will disable
- the gRPC subscribe endpoint on a Consul Server. All
- servers in all federated datacenters must have this enabled before any client can use
- [`use_streaming_backend`](#use_streaming_backend).
-
-- `reporting` - This option allows options for HashiCorp reporting.
- - `license` - The license object allows users to control automatic reporting of license utilization metrics to HashiCorp.
- - `enabled`: (Defaults to `true`) Enables automatic license utilization reporting.
-
-- `segment` - Equivalent to the [`-segment` command-line flag](/consul/docs/agent/config/cli-flags#_segment).
-
- ~> **Warning:** The `segment` option cannot be used with the [`partition`](#partition-1) option.
-
-- `segments` - (Server agents only) This is a list of nested objects
- that specifies user-defined network segments, not including the `` segment, which is
- created automatically. Refer to the [network segments documentation](/consul/docs/enterprise/network-segments/create-network-segment)for additional information.
- for more details.
-
- - `name` ((#segment_name)) - The name of the segment. Must be a string
- between 1 and 64 characters in length.
- - `bind` ((#segment_bind)) - The bind address to use for the segment's
- gossip layer. Defaults to the [`-bind`](#_bind) value if not provided.
- - `port` ((#segment_port)) - The port to use for the segment's gossip
- layer (required).
- - `advertise` ((#segment_advertise)) - The advertise address to use for
- the segment's gossip layer. Defaults to the [`-advertise`](/consul/docs/agent/config/cli-flags#_advertise) value
- if not provided.
- - `rpc_listener` ((#segment_rpc_listener)) - If true, a separate RPC
- listener will be started on this segment's [`-bind`](/consul/docs/agent/config/cli-flags#_bind) address on the rpc
- port. Only valid if the segment's bind address differs from the [`-bind`](/consul/docs/agent/config/cli-flags#_bind)
- address. Defaults to false.
-
-- `server` Equivalent to the [`-server` command-line flag](/consul/docs/agent/config/cli-flags#_server).
-
-- `server_rejoin_age_max` - controls the allowed maximum age of a stale server attempting to rejoin a cluster.
- If the server has not ran during this period, it will refuse to start up again until an operator intervenes by manually deleting the `server_metadata.json`
- file located in the data dir.
- This is to protect clusters from instability caused by decommissioned servers accidentally being started again.
- Note: the default value is 168h (equal to 7d) and the minimum value is 6h.
-
-- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.**
-
-- `read_replica` - Equivalent to the [`-read-replica` command-line flag](/consul/docs/agent/config/cli-flags#_read_replica).
-
-- `session_ttl_min` The minimum allowed session TTL. This ensures sessions are not created with TTLs
- shorter than the specified limit. It is recommended to keep this limit at or above
- the default to encourage clients to send infrequent heartbeats. Defaults to 10s.
-
-- `skip_leave_on_interrupt` This is similar
- to [`leave_on_terminate`](#leave_on_terminate) but only affects interrupt handling.
- When Consul receives an interrupt signal (such as hitting Control-C in a terminal),
- Consul will gracefully leave the cluster. Setting this to `true` disables that
- behavior. The default behavior for this feature varies based on whether or not
- the agent is running as a client or a server (prior to Consul 0.7 the default value
- was unconditionally set to `false`). On agents in client-mode, this defaults to
- `false` and for agents in server-mode, this defaults to `true` (i.e. Ctrl-C on
- a server will keep the server in the cluster and therefore quorum, and Ctrl-C on
- a client will gracefully leave).
-
-- `translate_wan_addrs` If set to true, Consul
- will prefer a node's configured [WAN address](/consul/docs/agent/config/cli-flags#_advertise-wan)
- when servicing DNS and HTTP requests for a node in a remote datacenter. This allows
- the node to be reached within its own datacenter using its local address, and reached
- from other datacenters using its WAN address, which is useful in hybrid setups
- with mixed networks. This is disabled by default.
-
- Starting in Consul 0.7 and later, node addresses in responses to HTTP requests will also prefer a
- node's configured [WAN address](/consul/docs/agent/config/cli-flags#_advertise-wan) when querying for a node in a remote
- datacenter. An [`X-Consul-Translate-Addresses`](/consul/api-docs/api-structure#translated-addresses) header
- will be present on all responses when translation is enabled to help clients know that the addresses
- may be translated. The `TaggedAddresses` field in responses also have a `lan` address for clients that
- need knowledge of that address, regardless of translation.
-
- The following endpoints translate addresses:
-
- - [`/v1/catalog/nodes`](/consul/api-docs/catalog#list-nodes)
- - [`/v1/catalog/node/`](/consul/api-docs/catalog#retrieve-map-of-services-for-a-node)
- - [`/v1/catalog/service/`](/consul/api-docs/catalog#list-nodes-for-service)
- - [`/v1/health/service/`](/consul/api-docs/health#list-nodes-for-service)
- - [`/v1/query//execute`](/consul/api-docs/query#execute-prepared-query)
-
-- `unix_sockets` - This allows tuning the ownership and
- permissions of the Unix domain socket files created by Consul. Domain sockets are
- only used if the HTTP address is configured with the `unix://` prefix.
-
- It is important to note that this option may have different effects on
- different operating systems. Linux generally observes socket file permissions
- while many BSD variants ignore permissions on the socket file itself. It is
- important to test this feature on your specific distribution. This feature is
- currently not functional on Windows hosts.
-
- The following options are valid within this construct and apply globally to all
- sockets created by Consul:
-
- - `user` - The name or ID of the user who will own the socket file.
- - `group` - The group ID ownership of the socket file. This option
- currently only supports numeric IDs.
- - `mode` - The permission bits to set on the file.
-
-- `use_streaming_backend` defaults to true. When enabled Consul client agents will use
- streaming rpc, instead of the traditional blocking queries, for endpoints which support
- streaming. All servers must have [`rpc.enable_streaming`](#rpc_enable_streaming)
- enabled before any client can enable `use_streaming_backend`.
-
-- `watches` - Watches is a list of watch specifications which
- allow an external process to be automatically invoked when a particular data view
- is updated. See the [watch documentation](/consul/docs/dynamic-app-config/watches) for more detail.
- Watches can be modified when the configuration is reloaded.
-
-## ACL Parameters
-
-- `acl` ((#acl)) - This object allows a number of sub-keys to be set which
- controls the ACL system. Configuring the ACL system within the ACL stanza was added
- in Consul 1.4.0
-
- The following sub-keys are available:
-
- - `enabled` ((#acl_enabled)) - Enables ACLs.
-
- - `policy_ttl` ((#acl_policy_ttl)) - Used to control Time-To-Live caching
- of ACL policies. By default, this is 30 seconds. This setting has a major performance
- impact: reducing it will cause more frequent refreshes while increasing it reduces
- the number of refreshes. However, because the caches are not actively invalidated,
- ACL policy may be stale up to the TTL value.
-
- - `role_ttl` ((#acl_role_ttl)) - Used to control Time-To-Live caching
- of ACL roles. By default, this is 30 seconds. This setting has a major performance
- impact: reducing it will cause more frequent refreshes while increasing it reduces
- the number of refreshes. However, because the caches are not actively invalidated,
- ACL role may be stale up to the TTL value.
-
- - `token_ttl` ((#acl_token_ttl)) - Used to control Time-To-Live caching
- of ACL tokens. By default, this is 30 seconds. This setting has a major performance
- impact: reducing it will cause more frequent refreshes while increasing it reduces
- the number of refreshes. However, because the caches are not actively invalidated,
- ACL token may be stale up to the TTL value.
-
- - `down_policy` ((#acl_down_policy)) - Either "allow", "deny", "extend-cache"
- or "async-cache"; "extend-cache" is the default. In the case that a policy or
- token cannot be read from the [`primary_datacenter`](#primary_datacenter) or
- leader node, the down policy is applied. In "allow" mode, all actions are permitted,
- "deny" restricts all operations, and "extend-cache" allows any cached objects
- to be used, ignoring the expiry time of the cached entry. If the request uses an
- ACL that is not in the cache, "extend-cache" falls back to the behavior of
- `default_policy`.
- The value "async-cache" acts the same way as "extend-cache"
- but performs updates asynchronously when ACL is present but its TTL is expired,
- thus, if latency is bad between the primary and secondary datacenters, latency
- of operations is not impacted.
-
- - `default_policy` ((#acl_default_policy)) - Either "allow" or "deny";
- defaults to "allow" but this will be changed in a future major release. The default
- policy controls the behavior of a token when there is no matching rule. In "allow"
- mode, ACLs are a denylist: any operation not specifically prohibited is allowed.
- In "deny" mode, ACLs are an allowlist: any operation not specifically
- allowed is blocked. **Note**: this will not take effect until you've enabled ACLs.
-
- - `enable_key_list_policy` ((#acl_enable_key_list_policy)) - Boolean value, defaults to false.
- When true, the `list` permission will be required on the prefix being recursively read from the KV store.
- Regardless of being enabled, the full set of KV entries under the prefix will be filtered
- to remove any entries that the request's ACL token does not grant at least read
- permissions. This option is only available in Consul 1.0 and newer.
-
- - `enable_token_replication` ((#acl_enable_token_replication)) - By default
- secondary Consul datacenters will perform replication of only ACL policies and
- roles. Setting this configuration will will enable ACL token replication and
- allow for the creation of both [local tokens](/consul/api-docs/acl/tokens#local) and
- [auth methods](/consul/docs/security/acl/auth-methods) in connected secondary datacenters.
-
- ~> **Warning:** When enabling ACL token replication on the secondary datacenter,
- global tokens already present in the secondary datacenter will be lost. For
- production environments, consider configuring ACL replication in your initial
- datacenter bootstrapping process.
-
- - `enable_token_persistence` ((#acl_enable_token_persistence)) - Either
- `true` or `false`. When `true` tokens set using the API will be persisted to
- disk and reloaded when an agent restarts.
-
- - `tokens` ((#acl_tokens)) - This object holds all of the configured
- ACL tokens for the agents usage.
-
- - `initial_management` ((#acl_tokens_initial_management)) - This is available in
- Consul 1.11 and later. In prior versions, use [`acl.tokens.master`](#acl_tokens_master).
-
- Only used for servers in the [`primary_datacenter`](#primary_datacenter).
- This token will be created with management-level permissions if it does not exist.
- It allows operators to bootstrap the ACL system with a token Secret ID that is
- well-known.
-
- The `initial_management` token is only installed when a server acquires cluster
- leadership. If you would like to install or change it, set the new value for
- `initial_management` in the configuration for all servers. Once this is done,
- restart the current leader to force a leader election. If the `initial_management`
- token is not supplied, then the servers do not create an initial management token.
- When you provide a value, it should be a UUID. To maintain backwards compatibility
- and an upgrade path this restriction is not currently enforced but will be in a
- future major Consul release.
-
- - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to
- [`acl.tokens.initial_management`](#acl_tokens_initial_management).**
-
- - `default` ((#acl_tokens_default)) - When provided, this agent will
- use this token by default when making requests to the Consul servers
- instead of the [anonymous token](/consul/docs/security/acl/tokens#anonymous-token).
- Consul HTTP API requests can provide an alternate token in their authorization header
- to override the `default` or anonymous token on a per-request basis,
- as described in [HTTP API Authentication](/consul/api-docs/api-structure#authentication).
-
- - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform
- internal operations. If this isn't specified, then the
- [`default`](#acl_tokens_default) will be used.
-
- This token must at least have write access to the node name it will
- register as in order to set any of the node-level information in the
- catalog such as metadata, or the node's tagged addresses.
-
- - `agent_recovery` ((#acl_tokens_agent_recovery)) - This is available in Consul 1.11
- and later. In prior versions, use [`acl.tokens.agent_master`](#acl_tokens_agent_master).
-
- Used to access [agent endpoints](/consul/api-docs/agent) that require agent read or write privileges,
- or node read privileges, even if Consul servers aren't present to validate any tokens.
- This should only be used by operators during outages, regular ACL tokens should normally
- be used by applications.
-
- - `agent_master` ((#acl_tokens_agent_master)) **Renamed in Consul 1.11 to
- [`acl.tokens.agent_recovery`](#acl_tokens_agent_recovery).**
-
- - `config_file_service_registration` ((#acl_tokens_config_file_service_registration)) - Specifies the ACL
- token the agent uses to register services and checks from [service](/consul/docs/services/usage/define-services) and [check](/consul/docs/services/usage/checks) definitions
- specified in configuration files or fragments passed to the agent using the `-hcl`
- flag.
-
- If the `token` field is defined in the service or check definition, then that token is used to
- register the service or check instead. If the `config_file_service_registration` token is not
- defined and if the `token` field is not defined in the service or check definition, then the
- agent uses the [`default`](#acl_tokens_default) token to register the service or check.
-
- This token needs write permission to register all services and checks defined in this agent's
- configuration. For example, if there are two service definitions in the agent's configuration
- files for services "A" and "B", then the token needs `service:write` permissions for both
- services "A" and "B" in order to successfully register both services. If the token is missing
- `service:write` permissions for service "B", the agent will successfully register service "A"
- and fail to register service "B". Failed registration requests are eventually retried as part
- of [anti-entropy enforcement](/consul/docs/architecture/anti-entropy). If a registration request is
- failing due to missing permissions, the token for this agent can be updated with
- additional policy rules or the `config_file_service_registration` token can be replaced using
- the [Set Agent Token](/consul/commands/acl/set-agent-token) CLI command.
-
- - `dns` ((#acl_tokens_dns)) - Specifies the token that agents use to request information needed to respond to DNS queries.
- If the `dns` token is not set, the `default` token is used instead.
- Because the `default` token allows unauthenticated HTTP API access to list nodes and services, we
- strongly recommend using the `dns` token. Create DNS tokens using the [templated policy](/consul/docs/security/acl/tokens/create/create-a-dns-token#create_a_dns_token)
- option to ensure that the token has the permissions needed to respond to all DNS queries.
-
- - `replication` ((#acl_tokens_replication)) - Specifies the token that the agent uses to
- authorize secondary datacenters with the primary datacenter for replication
- operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/consul/api-docs/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables service mesh data replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data.
-
- ~> **Warning:** When enabling ACL token replication on the secondary datacenter,
- policies and roles already present in the secondary datacenter will be lost. For
- production environments, consider configuring ACL replication in your initial
- datacenter bootstrapping process.
-
- - `managed_service_provider` ((#acl_tokens_managed_service_provider)) - An
- array of ACL tokens used by Consul managed service providers for cluster operations.
-
-
-
- ```hcl
- managed_service_provider {
- accessor_id = "ed22003b-0832-4e48-ac65-31de64e5c2ff"
- secret_id = "cb6be010-bba8-4f30-a9ed-d347128dde17"
- }
- ```
-
- ```json
- "managed_service_provider": [
- {
- "accessor_id": "ed22003b-0832-4e48-ac65-31de64e5c2ff",
- "secret_id": "cb6be010-bba8-4f30-a9ed-d347128dde17"
- }
- ]
- ```
-
-
-
-- `acl_datacenter` - **This field is deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.**
-
- This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients,
- it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement
- of ACLs. Please review the [ACL tutorial](/consul/tutorials/security/access-control-setup-production) for more details.
-
-- `acl_default_policy` ((#acl_default_policy_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.default_policy`](#acl_default_policy) field instead.**
- Either "allow" or "deny"; defaults to "allow". The default policy controls the
- behavior of a token when there is no matching rule. In "allow" mode, ACLs are a
- denylist: any operation not specifically prohibited is allowed. In "deny" mode,
- ACLs are an allowlist: any operation not specifically allowed is blocked. **Note**:
- this will not take effect until you've set `primary_datacenter` to enable ACL support.
-
-- `acl_down_policy` ((#acl_down_policy_legacy)) - **Deprecated in Consul
- 1.4.0. See the [`acl.down_policy`](#acl_down_policy) field instead.** Either "allow",
- "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the
- case that the policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter)
- or leader node, the down policy is applied. In "allow" mode, all actions are permitted,
- "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be
- used, ignoring their TTL values. If a non-cached ACL is used, "extend-cache" acts
- like "deny". The value "async-cache" acts the same way as "extend-cache" but performs
- updates asynchronously when ACL is present but its TTL is expired, thus, if latency
- is bad between ACL authoritative and other datacenters, latency of operations is
- not impacted.
-
-- `acl_agent_master_token` ((#acl_agent_master_token_legacy)) - **Deprecated
- in Consul 1.4.0. See the [`acl.tokens.agent_master`](#acl_tokens_agent_master)
- field instead.** Used to access [agent endpoints](/consul/api-docs/agent) that
- require agent read or write privileges, or node read privileges, even if Consul
- servers aren't present to validate any tokens. This should only be used by operators
- during outages, regular ACL tokens should normally be used by applications. This
- was added in Consul 0.7.2 and is only used when [`acl_enforce_version_8`](#acl_enforce_version_8) is set to true.
-
-- `acl_agent_token` ((#acl_agent_token_legacy)) - **Deprecated in Consul
- 1.4.0. See the [`acl.tokens.agent`](#acl_tokens_agent) field instead.** Used for
- clients and servers to perform internal operations. If this isn't specified, then
- the [`acl_token`](#acl_token) will be used. This was added in Consul 0.7.2.
-
- This token must at least have write access to the node name it will register as in order to set any
- of the node-level information in the catalog such as metadata, or the node's tagged addresses.
-
-- `acl_enforce_version_8` - **Deprecated in
- Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should
- occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2,
- this defaults to false in versions of Consul prior to 0.8, and defaults to true
- in Consul 0.8 and later. This helps ease the transition to the new ACL features
- by allowing policies to be in place before enforcement begins.
-
-- `acl_master_token` ((#acl_master_token_legacy)) - **Deprecated in Consul
- 1.4.0. See the [`acl.tokens.master`](#acl_tokens_master) field instead.**
-
-- `acl_replication_token` ((#acl_replication_token_legacy)) - **Deprecated
- in Consul 1.4.0. See the [`acl.tokens.replication`](#acl_tokens_replication) field
- instead.** Only used for servers outside the [`primary_datacenter`](#primary_datacenter)
- running Consul 0.7 or later. When provided, this will enable [ACL replication](/consul/tutorials/security-operations/access-control-replication-multiple-datacenters)
- using this ACL replication using this token to retrieve and replicate the ACLs
- to the non-authoritative local datacenter. In Consul 0.9.1 and later you can enable
- ACL replication using [`acl.enable_token_replication`](#acl_enable_token_replication) and then
- set the token later using the [agent token API](/consul/api-docs/agent#update-acl-tokens)
- on each server. If the `acl_replication_token` is set in the config, it will automatically
- set [`acl.enable_token_replication`](#acl_enable_token_replication) to true for backward compatibility.
-
- If there's a partition or other outage affecting the authoritative datacenter, and the
- [`acl_down_policy`](/consul/docs/agent/config/config-files#acl_down_policy) is set to "extend-cache", tokens not
- in the cache can be resolved during the outage using the replicated set of ACLs.
-
-- `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See
- the [`acl.tokens.default`](#acl_tokens_default) field instead.**
-
-- `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the
- [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live
- caching of ACLs. By default, this is 30 seconds. This setting has a major performance
- impact: reducing it will cause more frequent refreshes while increasing it reduces
- the number of refreshes. However, because the caches are not actively invalidated,
- ACL policy may be stale up to the TTL value.
-
-- `enable_acl_replication` **Deprecated in Consul 1.11. Use the [`acl.enable_token_replication`](#acl_enable_token_replication) field instead.**
- When set on a Consul server, enables ACL replication without having to set
- the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication
- and then introduce the token using the [agent token API](/consul/api-docs/agent#update-acl-tokens) on each server.
- See [`acl_replication_token`](#acl_replication_token) for more details.
-
- ~> **Warning:** When enabling ACL token replication on the secondary datacenter,
- policies and roles already present in the secondary datacenter will be lost. For
- production environments, consider configuring ACL replication in your initial
- datacenter bootstrapping process.
-
-## Advertise Address Parameters
-
-- `advertise_addr` Equivalent to the [`-advertise` command-line flag](/consul/docs/agent/config/cli-flags#_advertise).
-
-- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery.
-
-- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery.
-
-- `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](/consul/docs/agent/config/cli-flags#_advertise-wan).
-
-- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery.
-
-- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery.
-
-- `advertise_reconnect_timeout` This is a per-agent setting of the [`reconnect_timeout`](#reconnect_timeout) parameter.
- This agent will advertise to all other nodes in the cluster that after this timeout, the node may be completely
- removed from the cluster. This may only be set on client agents and if unset then other nodes will use the main
- `reconnect_timeout` setting when determining when this node may be removed from the cluster.
-
-## Bootstrap Parameters
-
-- `bootstrap` Equivalent to the [`-bootstrap` command-line flag](/consul/docs/agent/config/cli-flags#_bootstrap).
-
-- `bootstrap_expect` Equivalent to the [`-bootstrap-expect` command-line flag](/consul/docs/agent/config/cli-flags#_bootstrap_expect).
-
-## Self-managed HCP Parameters
-
-- `cloud` This object specifies settings for connecting self-managed clusters to HCP. This was added in Consul 1.14
-
- - `client_id` The OAuth2 client ID for authentication with HCP. This can be overridden using the `HCP_CLIENT_ID` environment variable.
-
- - `client_secret` The OAuth2 client secret for authentication with HCP. This can be overridden using the `HCP_CLIENT_SECRET` environment variable.
-
- - `resource_id` The HCP resource identifier. This can be overridden using the `HCP_RESOURCE_ID` environment variable.
-
-## Service Mesh Parameters ((#connect-parameters))
-
-The noun _connect_ is used throughout this documentation to refer to the connect
-subsystem that provides Consul's service mesh capabilities.
-
-- `connect` This object allows setting options for the Connect feature.
-
- The following sub-keys are available:
-
- - `enabled` ((#connect_enabled)) (Defaults to `true`) Controls whether Connect features are
- enabled on this agent. Should be enabled on all servers in the cluster
- in order for service mesh to function properly.
- Will be set to `true` automatically if `auto_config.enabled` or `auto_encrypt.allow_tls` is `true`.
-
- - `enable_mesh_gateway_wan_federation` ((#connect_enable_mesh_gateway_wan_federation)) (Defaults to `false`) Controls whether cross-datacenter federation traffic between servers is funneled
- through mesh gateways. This was added in Consul 1.8.0.
-
- - `ca_provider` ((#connect_ca_provider)) Controls which CA provider to
- use for the service mesh's CA. Currently only the `aws-pca`, `consul`, and `vault` providers are supported.
- This is only used when initially bootstrapping the cluster. For an existing cluster,
- use the [Update CA Configuration Endpoint](/consul/api-docs/connect/ca#update-ca-configuration).
-
- - `ca_config` ((#connect_ca_config)) An object which allows setting different
- config options based on the CA provider chosen. This is only used when initially
- bootstrapping the cluster. For an existing cluster, use the [Update CA Configuration
- Endpoint](/consul/api-docs/connect/ca#update-ca-configuration).
-
- The following providers are supported:
-
- #### AWS ACM Private CA Provider (`ca_provider = "aws-pca"`)
-
- - `existing_arn` ((#aws_ca_existing_arn)) The Amazon Resource Name (ARN) of
- an existing private CA in your ACM account. If specified, Consul will
- attempt to use the existing CA to issue certificates.
-
- #### Consul CA Provider (`ca_provider = "consul"`)
-
- - `private_key` ((#consul_ca_private_key)) The PEM contents of the
- private key to use for the CA.
-
- - `root_cert` ((#consul_ca_root_cert)) The PEM contents of the root
- certificate to use for the CA.
-
- #### Vault CA Provider (`ca_provider = "vault"`)
-
- - `address` ((#vault_ca_address)) The address of the Vault server to
- connect to.
-
- - `token` ((#vault_ca_token)) The Vault token to use. In Consul 1.8.5 and later, if
- the token has the [renewable](/vault/api-docs/auth/token#renewable)
- flag set, Consul will attempt to renew its lease periodically after half the
- duration has expired.
-
- - `root_pki_path` ((#vault_ca_root_pki)) The path to use for the root
- CA pki backend in Vault. This can be an existing backend with a CA already
- configured, or a blank/unmounted backend in which case Consul will automatically
- mount/generate the CA. The Vault token given above must have `sudo` access
- to this backend, as well as permission to mount the backend at this path if
- it is not already mounted.
-
- - `intermediate_pki_path` ((#vault_ca_intermediate_pki))
- The path to use for the temporary intermediate CA pki backend in Vault. **Consul
- will overwrite any data at this path in order to generate a temporary intermediate
- CA**. The Vault token given above must have `write` access to this backend,
- as well as permission to mount the backend at this path if it is not already
- mounted.
-
- - `auth_method` ((#vault_ca_auth_method))
- Vault auth method to use for logging in to Vault.
- Please see [Vault Auth Methods](/vault/docs/auth) for more information
- on how to configure individual auth methods. If auth method is provided, Consul will obtain a
- new token from Vault when the token can no longer be renewed.
-
- - `type` The type of Vault auth method.
-
- - `mount_path` The mount path of the auth method.
- If not provided the auth method type will be used as the mount path.
-
- - `params` The parameters to configure the auth method.
- Please see [Vault Auth Methods](/vault/docs/auth) for information on how
- to configure the auth method you wish to use. If using the Kubernetes auth method, Consul will
- read the service account token from the default mount path `/var/run/secrets/kubernetes.io/serviceaccount/token`
- if the `jwt` parameter is not provided.
-
- #### Common CA Config Options
-
- There are also a number of common configuration options supported by all providers:
-
- - `csr_max_concurrent` ((#ca_csr_max_concurrent)) Sets a limit on the number
- of Certificate Signing Requests that can be processed concurrently. Defaults
- to 0 (disabled). This is useful when you want to limit the number of CPU cores
- available to the server for certificate signing operations. For example, on an
- 8 core server, setting this to 1 will ensure that no more than one CPU core
- will be consumed when generating or rotating certificates. Setting this is
- recommended **instead** of `csr_max_per_second` when you want to limit the
- number of cores consumed since it is simpler to reason about limiting CSR
- resources this way without artificially slowing down rotations. Added in 1.4.1.
-
- - `csr_max_per_second` ((#ca_csr_max_per_second)) Sets a rate limit
- on the maximum number of Certificate Signing Requests (CSRs) the servers will
- accept. This is used to prevent CA rotation from causing unbounded CPU usage
- on servers. It defaults to 50 which is conservative – a 2017 Macbook can process
- about 100 per second using only ~40% of one CPU core – but sufficient for deployments
- up to ~1500 service instances before the time it takes to rotate is impacted.
- For larger deployments we recommend increasing this based on the expected number
- of server instances and server resources, or use `csr_max_concurrent` instead
- if servers have more than one CPU core. Setting this to zero disables rate limiting.
- Added in 1.4.1.
-
- - `leaf_cert_ttl` ((#ca_leaf_cert_ttl)) Specifies the upper bound on the expiry
- of a leaf certificate issued for a service. In most cases a new leaf
- certificate will be requested by a proxy before this limit is reached. This
- is also the effective limit on how long a server outage can last (with no leader)
- before network connections will start being rejected. Defaults to `72h`.
-
- You can specify a range from one hour (minimum) up to one year (maximum) using
- the following units: `h`, `m`, `s`, `ms`, `us` (or `µs`), `ns`, or a combination
- of those units, e.g. `1h5m`.
-
- This value is also used when rotating out old root certificates from
- the cluster. When a root certificate has been inactive (rotated out)
- for more than twice the _current_ `leaf_cert_ttl`, it will be removed
- from the trusted list.
-
- - `intermediate_cert_ttl` ((#ca_intermediate_cert_ttl)) Specifies the expiry for the
- intermediate certificates. Defaults to `8760h` (1 year). Must be at least 3 times `leaf_cert_ttl`.
-
- - `root_cert_ttl` ((#ca_root_cert_ttl)) Specifies the expiry for a root certificate.
- Defaults to 10 years as `87600h`. This value, if provided, needs to be higher than the
- intermediate certificate TTL.
-
- This setting applies to all Consul CA providers.
-
- For the Vault provider, this value is only used if the backend is not initialized at first.
-
- This value is also applied on the `ca set-config` command.
-
- - `private_key_type` ((#ca_private_key_type)) The type of key to generate
- for this CA. This is only used when the provider is generating a new key. If
- `private_key` is set for the Consul provider, or existing root or intermediate
- PKI paths given for Vault then this will be ignored. Currently supported options
- are `ec` or `rsa`. Default is `ec`.
-
- It is required that all servers in a datacenter have
- the same config for the CA. It is recommended that servers in
- different datacenters use the same key type and size,
- although the built-in CA and Vault provider will both allow mixed CA
- key types.
-
- Some CA providers (currently Vault) will not allow cross-signing a
- new CA certificate with a different key type. This means that if you
- migrate from an RSA-keyed Vault CA to an EC-keyed CA from any
- provider, you may have to proceed without cross-signing which risks
- temporary connection issues for workloads during the new certificate
- rollout. We highly recommend testing this outside of production to
- understand the impact and suggest sticking to same key type where
- possible.
-
- Note that this only affects _CA_ keys generated by the provider.
- Leaf certificate keys are always EC 256 regardless of the CA
- configuration.
-
- - `private_key_bits` ((#ca_private_key_bits)) The length of key to
- generate for this CA. This is only used when the provider is generating a new
- key. If `private_key` is set for the Consul provider, or existing root or intermediate
- PKI paths given for Vault then this will be ignored.
-
- Currently supported values are:
-
- - `private_key_type = ec` (default): `224, 256, 384, 521`
- corresponding to the NIST P-\* curves of the same name.
- - `private_key_type = rsa`: `2048, 4096`
-
-- `locality` : Specifies a map of configurations that set the region and zone of the Consul agent. When specified on server agents, `locality` applies to all partitions on the server. When specified on clients, `locality` applies to all services registered to the client. Configure this field to enable Consul to route traffic to the nearest physical service instance. This field is intended for use primarily with VM and Nomad workloads. Refer to [Route traffic to local upstreams](/consul/docs/connect/manage-traffic/route-to-local-upstreams) for additional information.
- - `region`: String value that specifies the region where the Consul agent is running. Consul assigns this value to services registered to that agent. When service proxy regions match, Consul is able to prioritize routes between service instances in the same region over instances in other regions. You must specify values that are consistent with how regions are defined in your network, for example `us-west-1` for networks in AWS.
- - `zone`: String value that specifies the availability zone where the Consul agent is running. Consul assigns this value to services registered to that agent. When service proxy regions match, Consul is able to prioritize routes between service instances in the same region and zone over instances in other regions and zones. When healthy service instances are available in multiple zones within the most-local region, Consul prioritizes instances that also match the downstream proxy's `zone`. You must specify values that are consistent with how zones are defined in your network, for example `us-west-1a` for networks in AWS.
-
-## DNS and Domain Parameters
-
-- `dns_config` This object allows a number of sub-keys
- to be set which can tune how DNS queries are serviced. Refer to [DNS caching](/consul/docs/services/discovery/dns-cache) for more information.
-
- The following sub-keys are available:
-
- - `allow_stale` - Enables a stale query for DNS information.
- This allows any Consul server, rather than only the leader, to service the request.
- The advantage of this is you get linear read scalability with Consul servers.
- In versions of Consul prior to 0.7, this defaulted to false, meaning all requests
- are serviced by the leader, providing stronger consistency but less throughput
- and higher latency. In Consul 0.7 and later, this defaults to true for better
- utilization of available servers.
-
- - `max_stale` - When [`allow_stale`](#allow_stale) is
- specified, this is used to limit how stale results are allowed to be. If a Consul
- server is behind the leader by more than `max_stale`, the query will be re-evaluated
- on the leader to get more up-to-date results. Prior to Consul 0.7.1 this defaulted
- to 5 seconds; in Consul 0.7.1 and later this defaults to 10 years ("87600h")
- which effectively allows DNS queries to be answered by any server, no matter
- how stale. In practice, servers are usually only milliseconds behind the leader,
- so this lets Consul continue serving requests in long outage scenarios where
- no leader can be elected.
-
- - `node_ttl` - By default, this is "0s", so all node lookups
- are served with a 0 TTL value. DNS caching for node lookups can be enabled by
- setting this value. This should be specified with the "s" suffix for second or
- "m" for minute.
-
- - `service_ttl` - This is a sub-object which allows
- for setting a TTL on service lookups with a per-service policy. The "\*" wildcard
- service can be used when there is no specific policy available for a service.
- By default, all services are served with a 0 TTL value. DNS caching for service
- lookups can be enabled by setting this value.
-
- - `enable_truncate` - If set to true, a UDP DNS
- query that would return more than 3 records, or more than would fit into a valid
- UDP response, will set the truncated flag, indicating to clients that they should
- re-query using TCP to get the full set of records.
-
- - `only_passing` - If set to true, any nodes whose
- health checks are warning or critical will be excluded from DNS results. If false,
- the default, only nodes whose health checks are failing as critical will be excluded.
- For service lookups, the health checks of the node itself, as well as the service-specific
- checks are considered. For example, if a node has a health check that is critical
- then all services on that node will be excluded because they are also considered
- critical.
-
- - `recursor_strategy` - If set to `sequential`, Consul will query recursors in the
- order listed in the [`recursors`](#recursors) option. If set to `random`,
- Consul will query an upstream DNS resolvers in a random order. Defaults to
- `sequential`.
-
- - `recursor_timeout` - Timeout used by Consul when
- recursively querying an upstream DNS server. See [`recursors`](#recursors) for more details. Default is 2s. This is available in Consul 0.7 and later.
-
- - `disable_compression` - If set to true, DNS
- responses will not be compressed. Compression was added and enabled by default
- in Consul 0.7.
-
- - `udp_answer_limit` - Limit the number of resource
- records contained in the answer section of a UDP-based DNS response. This parameter
- applies only to UDP DNS queries that are less than 512 bytes. This setting is
- deprecated and replaced in Consul 1.0.7 by [`a_record_limit`](#a_record_limit).
-
- - `a_record_limit` - Limit the number of resource
- records contained in the answer section of a A, AAAA or ANY DNS response (both
- TCP and UDP). When answering a question, Consul will use the complete list of
- matching hosts, shuffle the list randomly, and then limit the number of answers
- to `a_record_limit` (default: no limit). This limit does not apply to SRV records.
-
- In environments where [RFC 3484 Section 6](https://tools.ietf.org/html/rfc3484#section-6) Rule 9
- is implemented and enforced (i.e. DNS answers are always sorted and
- therefore never random), clients may need to set this value to `1` to
- preserve the expected randomized distribution behavior (note:
- [RFC 3484](https://tools.ietf.org/html/rfc3484) has been obsoleted by
- [RFC 6724](https://tools.ietf.org/html/rfc6724) and as a result it should
- be increasingly uncommon to need to change this value with modern
- resolvers).
-
- - `enable_additional_node_meta_txt` - When set to true, Consul
- will add TXT records for Node metadata into the Additional section of the DNS responses for several query types such as SRV queries. When set to false those records are not emitted. This does not impact the behavior of those same TXT records when they would be added to the Answer section of the response like when querying with type TXT or ANY. This defaults to true.
-
- - `soa` Allow to tune the setting set up in SOA. Non specified
- values fallback to their default values, all values are integers and expressed
- as seconds.
-
- The following settings are available:
-
- - `expire` ((#soa_expire)) - Configure SOA Expire duration in seconds,
- default value is 86400, ie: 24 hours.
-
- - `min_ttl` ((#soa_min_ttl)) - Configure SOA DNS minimum TTL. As explained
- in [RFC-2308](https://tools.ietf.org/html/rfc2308) this also controls negative
- cache TTL in most implementations. Default value is 0, ie: no minimum delay
- or negative TTL.
-
- - `refresh` ((#soa_refresh)) - Configure SOA Refresh duration in seconds,
- default value is `3600`, ie: 1 hour.
-
- - `retry` ((#soa_retry)) - Configures the Retry duration expressed
- in seconds, default value is 600, ie: 10 minutes.
-
- - `use_cache` ((#dns_use_cache)) - When set to true, DNS resolution will
- use the agent cache described in [agent caching](/consul/api-docs/features/caching).
- This setting affects all service and prepared queries DNS requests. Implies [`allow_stale`](#allow_stale)
-
- - `cache_max_age` ((#dns_cache_max_age)) - When [use_cache](#dns_use_cache)
- is enabled, the agent will attempt to re-fetch the result from the servers if
- the cached value is older than this duration. See: [agent caching](/consul/api-docs/features/caching).
-
- **Note** that unlike the `max-age` HTTP header, a value of 0 for this field is
- equivalent to "no max age". To get a fresh value from the cache use a very small value
- of `1ns` instead of 0.
-
- - `prefer_namespace` ((#dns_prefer_namespace)) **Deprecated in Consul 1.11.
- Use the [canonical DNS format for enterprise service lookups](/consul/docs/services/discovery/dns-static-lookups#service-lookups-for-consul-enterprise) instead.** -
- When set to `true`, in a DNS query for a service, a single label between the domain
- and the `service` label is treated as a namespace name instead of a datacenter.
- When set to `false`, the default, the behavior is the same as non-Enterprise
- versions and treats the single label as the datacenter.
-
-- `domain` Equivalent to the [`-domain` command-line flag](/consul/docs/agent/config/cli-flags#_domain).
-
-## Encryption Parameters
-
-- `auto_encrypt` This object allows setting options for the `auto_encrypt` feature.
-
- The following sub-keys are available:
-
- - `allow_tls` (Defaults to `false`) This option enables
- `auto_encrypt` on the servers and allows them to automatically distribute certificates
- from the service mesh CA to the clients. If enabled, the server can accept incoming
- connections from both the built-in CA and the service mesh CA, as well as their certificates.
- Note, the server will only present the built-in CA and certificate, which the
- client can verify using the CA it received from `auto_encrypt` endpoint. If disabled,
- a client configured with `auto_encrypt.tls` will be unable to start.
-
- - `tls` (Defaults to `false`) Allows the client to request the
- service mesh CA and certificates from the servers, for encrypting RPC communication.
- The client will make the request to any servers listed in the `-retry-join`
- option. This requires that every server to have `auto_encrypt.allow_tls` enabled.
- When both `auto_encrypt` options are used, it allows clients to receive certificates
- that are generated on the servers. If the `-server-port` is not the default one,
- it has to be provided to the client as well. Usually this is discovered through
- LAN gossip, but `auto_encrypt` provision happens before the information can be
- distributed through gossip. The most secure `auto_encrypt` setup is when the
- client is provided with the built-in CA, `verify_server_hostname` is turned on,
- and when an ACL token with `node.write` permissions is setup. It is also possible
- to use `auto_encrypt` with a CA and ACL, but without `verify_server_hostname`,
- or only with a ACL enabled, or only with CA and `verify_server_hostname`, or
- only with a CA, or finally without a CA and without ACL enabled. In any case,
- the communication to the `auto_encrypt` endpoint is always TLS encrypted.
-
- ~> **Warning:** Enabling `auto_encrypt.tls` conflicts with the [`auto_config`](#auto_config) feature.
- Only one option may be specified.
-
- - `dns_san` (Defaults to `[]`) When this option is being
- used, the certificates requested by `auto_encrypt` from the server have these
- `dns_san` set as DNS SAN.
-
- - `ip_san` (Defaults to `[]`) When this option is being used,
- the certificates requested by `auto_encrypt` from the server have these `ip_san`
- set as IP SAN.
-
-- `encrypt` Equivalent to the [`-encrypt` command-line flag](/consul/docs/agent/config/cli-flags#_encrypt).
-
-- `encrypt_verify_incoming` - This is an optional
- parameter that can be used to disable enforcing encryption for incoming gossip
- in order to upshift from unencrypted to encrypted gossip on a running cluster.
- See [this section](/consul/docs/security/encryption#configuring-gossip-encryption-on-an-existing-cluster)
- for more information. Defaults to true.
-
-- `encrypt_verify_outgoing` - This is an optional
- parameter that can be used to disable enforcing encryption for outgoing gossip
- in order to upshift from unencrypted to encrypted gossip on a running cluster.
- See [this section](/consul/docs/security/encryption#configuring-gossip-encryption-on-an-existing-cluster)
- for more information. Defaults to true.
-
-## Gossip Parameters
-
-- `gossip_lan` - **(Advanced)** This object contains a
- number of sub-keys which can be set to tune the LAN gossip communications. These
- are only provided for users running especially large clusters that need fine tuning
- and are prepared to spend significant effort correctly tuning them for their environment
- and workload. **Tuning these improperly can cause Consul to fail in unexpected
- ways**. The default values are appropriate in almost all deployments.
-
- - `gossip_nodes` - The number of random nodes to send
- gossip messages to per gossip_interval. Increasing this number causes the gossip
- messages to propagate across the cluster more quickly at the expense of increased
- bandwidth. The default is 3.
-
- - `gossip_interval` - The interval between sending
- messages that need to be gossiped that haven't been able to piggyback on probing
- messages. If this is set to zero, non-piggyback gossip is disabled. By lowering
- this value (more frequent) gossip messages are propagated across the cluster
- more quickly at the expense of increased bandwidth. The default is 200ms.
-
- - `probe_interval` - The interval between random
- node probes. Setting this lower (more frequent) will cause the cluster to detect
- failed nodes more quickly at the expense of increased bandwidth usage. The default
- is 1s.
-
- - `probe_timeout` - The timeout to wait for an ack
- from a probed node before assuming it is unhealthy. This should be at least the
- 99-percentile of RTT (round-trip time) on your network. The default is 500ms
- and is a conservative value suitable for almost all realistic deployments.
-
- - `retransmit_mult` - The multiplier for the number
- of retransmissions that are attempted for messages broadcasted over gossip. The
- number of retransmits is scaled using this multiplier and the cluster size. The
- higher the multiplier, the more likely a failed broadcast is to converge at the
- expense of increased bandwidth. The default is 4.
-
- - `suspicion_mult` - The multiplier for determining
- the time an inaccessible node is considered suspect before declaring it dead.
- The timeout is scaled with the cluster size and the probe_interval. This allows
- the timeout to scale properly with expected propagation delay with a larger cluster
- size. The higher the multiplier, the longer an inaccessible node is considered
- part of the cluster before declaring it dead, giving that suspect node more time
- to refute if it is indeed still alive. The default is 4.
-
-- `gossip_wan` - **(Advanced)** This object contains a
- number of sub-keys which can be set to tune the WAN gossip communications. These
- are only provided for users running especially large clusters that need fine tuning
- and are prepared to spend significant effort correctly tuning them for their environment
- and workload. **Tuning these improperly can cause Consul to fail in unexpected
- ways**. The default values are appropriate in almost all deployments.
-
- - `gossip_nodes` - The number of random nodes to send
- gossip messages to per gossip_interval. Increasing this number causes the gossip
- messages to propagate across the cluster more quickly at the expense of increased
- bandwidth. The default is 4.
-
- - `gossip_interval` - The interval between sending
- messages that need to be gossiped that haven't been able to piggyback on probing
- messages. If this is set to zero, non-piggyback gossip is disabled. By lowering
- this value (more frequent) gossip messages are propagated across the cluster
- more quickly at the expense of increased bandwidth. The default is 500ms.
-
- - `probe_interval` - The interval between random
- node probes. Setting this lower (more frequent) will cause the cluster to detect
- failed nodes more quickly at the expense of increased bandwidth usage. The default
- is 5s.
-
- - `probe_timeout` - The timeout to wait for an ack
- from a probed node before assuming it is unhealthy. This should be at least the
- 99-percentile of RTT (round-trip time) on your network. The default is 3s
- and is a conservative value suitable for almost all realistic deployments.
-
- - `retransmit_mult` - The multiplier for the number
- of retransmissions that are attempted for messages broadcasted over gossip. The
- number of retransmits is scaled using this multiplier and the cluster size. The
- higher the multiplier, the more likely a failed broadcast is to converge at the
- expense of increased bandwidth. The default is 4.
-
- - `suspicion_mult` - The multiplier for determining
- the time an inaccessible node is considered suspect before declaring it dead.
- The timeout is scaled with the cluster size and the probe_interval. This allows
- the timeout to scale properly with expected propagation delay with a larger cluster
- size. The higher the multiplier, the longer an inaccessible node is considered
- part of the cluster before declaring it dead, giving that suspect node more time
- to refute if it is indeed still alive. The default is 6.
-
-## Join Parameters
-
-- `rejoin_after_leave` Equivalent to the [`-rejoin` command-line flag](/consul/docs/agent/config/cli-flags#_rejoin).
-
-- `retry_join` - Equivalent to the [`-retry-join`](/consul/docs/agent/config/cli-flags#retry-join) command-line flag.
-
-- `retry_interval` Equivalent to the [`-retry-interval` command-line flag](/consul/docs/agent/config/cli-flags#_retry_interval).
-
-- `retry_max` - Equivalent to the [`-retry-max`](/consul/docs/agent/config/cli-flags#_retry_max) command-line flag.
-
-- `retry_join_wan` Equivalent to the [`-retry-join-wan` command-line flag](/consul/docs/agent/config/cli-flags#_retry_join_wan). Takes a list of addresses to attempt joining to WAN every [`retry_interval_wan`](#_retry_interval_wan) until at least one join works.
-
-- `retry_interval_wan` Equivalent to the [`-retry-interval-wan` command-line flag](/consul/docs/agent/config/cli-flags#_retry_interval_wan).
-
-- `start_join` **Deprecated in Consul 1.15. Use the [`retry_join`](/consul/docs/agent/config/config-files#retry_join) field instead. This field will be removed in a future version of Consul.**
- This field is an alias of `retry_join`.
-
-- `start_join_wan` **Deprecated in Consul 1.15. Use the [`retry_join_wan`](/consul/docs/agent/config/config-files#retry_join_wan) field instead. This field will be removed in a future version of Consul.**
- This field is an alias of `retry_join_wan`.
-
-## Log Parameters
-
-- `log_file` Equivalent to the [`-log-file` command-line flag](/consul/docs/agent/config/cli-flags#_log_file).
-
-- `log_rotate_duration` Equivalent to the [`-log-rotate-duration` command-line flag](/consul/docs/agent/config/cli-flags#_log_rotate_duration).
-
-- `log_rotate_bytes` Equivalent to the [`-log-rotate-bytes` command-line flag](/consul/docs/agent/config/cli-flags#_log_rotate_bytes).
-
-- `log_rotate_max_files` Equivalent to the [`-log-rotate-max-files` command-line flag](/consul/docs/agent/config/cli-flags#_log_rotate_max_files).
-
-- `log_level` Equivalent to the [`-log-level` command-line flag](/consul/docs/agent/config/cli-flags#_log_level).
-
-- `log_json` Equivalent to the [`-log-json` command-line flag](/consul/docs/agent/config/cli-flags#_log_json).
-
-- `enable_syslog` Equivalent to the [`-syslog` command-line flag](/consul/docs/agent/config/cli-flags#_syslog).
-
-- `syslog_facility` When [`enable_syslog`](#enable_syslog)
- is provided, this controls to which facility messages are sent. By default, `LOCAL0`
- will be used.
-
-## Node Parameters
-
-- `node_id` Equivalent to the [`-node-id` command-line flag](/consul/docs/agent/config/cli-flags#_node_id).
-
-- `node_name` Equivalent to the [`-node` command-line flag](/consul/docs/agent/config/cli-flags#_node).
-
-- `node_meta` Available in Consul 0.7.3 and later, This object allows associating arbitrary metadata key/value pairs with the local node, which can then be used for filtering results from certain catalog endpoints. See the [`-node-meta` command-line flag](/consul/docs/agent/config/cli-flags#_node_meta) for more information.
-
-
-
- ```hcl
- node_meta {
- instance_type = "t2.medium"
- }
- ```
-
- ```json
- {
- "node_meta": {
- "instance_type": "t2.medium"
- }
- }
- ```
-
-
-
-- `disable_host_node_id` Equivalent to the [`-disable-host-node-id` command-line flag](/consul/docs/agent/config/cli-flags#_disable_host_node_id).
-
-## Raft Parameters
-
-- `raft_boltdb` ((#raft_boltdb)) **These fields are deprecated in Consul v1.15.0.
- Use [`raft_logstore`](#raft_logstore) instead.** This is a nested
- object that allows configuring options for Raft's BoltDB-based log store.
-
- - `NoFreelistSync` **This field is deprecated in Consul v1.15.0. Use the
- [`raft_logstore.boltdb.no_freelist_sync`](#raft_logstore_boltdb_no_freelist_sync) field
- instead.** Setting this to `true` disables syncing the BoltDB freelist
- to disk within the raft.db file. Not syncing the freelist to disk
- reduces disk IO required for write operations at the expense of potentially
- increasing start up time due to needing to scan the db to discover where the
- free space resides within the file.
-
-- `raft_logstore` ((#raft_logstore)) This is a nested object that allows
- configuring options for Raft's LogStore component which is used to persist
- logs and crucial Raft state on disk during writes. This was added in Consul
- v1.15.0.
-
- - `backend` ((#raft_logstore_backend)) Specifies which storage
- engine to use to persist logs. Valid options are `boltdb` or `wal`. Default
- is `boltdb`. The `wal` option specifies an experimental backend that
- should be used with caution. Refer to
- [Experimental WAL LogStore backend](/consul/docs/agent/wal-logstore)
- for more information.
-
- - `disable_log_cache` ((#raft_logstore_disable_log_cache)) Disables the in-memory cache for recent logs. We recommend using it for performance testing purposes, as no significant improvement has been measured when the cache is disabled. While the in-memory log cache theoretically prevents disk reads for recent logs, recent logs are also stored in the OS page cache, which does not slow either the `boltdb` or `wal` backend's ability to read them.
-
- - `verification` ((#raft_logstore_verification)) This is a nested object that
- allows configuring the online verification of the LogStore. Verification
- provides additional assurances that LogStore backends are correctly storing
- data. It imposes low overhead on servers and is safe to run in
- production. It is most useful when evaluating a new backend
- implementation.
-
- Verification must be enabled on the leader to have any effect and can be
- used with any backend. When enabled, the leader periodically writes a
- special "checkpoint" log message that includes the checksums of all log entries
- written to Raft since the last checkpoint. Followers that have verification
- enabled run a background task for each checkpoint that reads all logs
- directly from the LogStore and then recomputes the checksum. A report is output
- as an INFO level log for each checkpoint.
-
- Checksum failure should never happen and indicate unrecoverable corruption
- on that server. The only correct response is to stop the server, remove its
- data directory, and restart so it can be caught back up with a correct
- server again. Please report verification failures including details about
- your hardware and workload via GitHub issues. Refer to
- [Experimental WAL LogStore backend](/consul/docs/agent/wal-logstore)
- for more information.
-
- - `enabled` ((#raft_logstore_verification_enabled)) - Set to `true` to
- allow this Consul server to write and verify log verification checkpoints
- when elected leader.
-
- - `interval` ((#raft_logstore_verification_interval)) - Specifies the time
- interval between checkpoints. There is no default value. You must
- configure the `interval` and set [`enabled`](#raft_logstore_verification_enabled)
- to `true` to correctly enable intervals. We recommend using an interval
- between `30s` and `5m`. The performance overhead is insignificant when the
- interval is set to `5m` or less.
-
- - `boltdb` ((#raft_logstore_boltdb)) - Object that configures options for
- Raft's `boltdb` backend. It has no effect if the `backend` is not `boltdb`.
-
- - `no_freelist_sync` ((#raft_logstore_boltdb_no_freelist_sync)) - Set to
- `true` to disable storing BoltDB's freelist to disk within the
- `raft.db` file. Disabling freelist syncs reduces the disk IO required
- for write operations, but could potentially increase start up time
- because Consul must scan the database to find free space
- within the file.
-
- - `wal` ((#raft_logstore_wal)) - Object that configures the `wal` backend.
- Refer to [Experimental WAL LogStore backend](/consul/docs/agent/wal-logstore)
- for more information.
-
- - `segment_size_mb` ((#raft_logstore_wal_segment_size_mb)) - Integer value
- that represents the target size in MB for each segment file before
- rolling to a new segment. The default value is `64` and is suitable for
- most deployments. While a smaller value may use less disk space because you
- can reclaim space by deleting old segments sooner, the smaller segment that results
- may affect performance because safely rotating to a new file more
- frequently can impact tail latencies. Larger values are unlikely
- to improve performance significantly. We recommend using this
- configuration for performance testing purposes.
-
-- `raft_protocol` ((#raft_protocol)) Equivalent to the [`-raft-protocol`
- command-line flag](/consul/docs/agent/config/cli-flags#_raft_protocol).
-
-- `raft_snapshot_threshold` ((#\_raft_snapshot_threshold)) This controls the
- minimum number of raft commit entries between snapshots that are saved to
- disk. This is a low-level parameter that should rarely need to be changed.
- Very busy clusters experiencing excessive disk IO may increase this value to
- reduce disk IO, and minimize the chances of all servers taking snapshots at
- the same time. Increasing this trades off disk IO for disk space since the log
- will grow much larger and the space in the raft.db file can't be reclaimed
- till the next snapshot. Servers may take longer to recover from crashes or
- failover if this is increased significantly as more logs will need to be
- replayed. In Consul 1.1.0 and later this defaults to 16384, and in prior
- versions it was set to 8192.
-
- Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the
- server a `SIGHUP` to allow tuning snapshot activity without a rolling restart
- in emergencies.
-
-- `raft_snapshot_interval` ((#\_raft_snapshot_interval)) This controls how often
- servers check if they need to save a snapshot to disk. This is a low-level
- parameter that should rarely need to be changed. Very busy clusters
- experiencing excessive disk IO may increase this value to reduce disk IO, and
- minimize the chances of all servers taking snapshots at the same time.
- Increasing this trades off disk IO for disk space since the log will grow much
- larger and the space in the raft.db file can't be reclaimed till the next
- snapshot. Servers may take longer to recover from crashes or failover if this
- is increased significantly as more logs will need to be replayed. In Consul
- 1.1.0 and later this defaults to `30s`, and in prior versions it was set to
- `5s`.
-
- Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the
- server a `SIGHUP` to allow tuning snapshot activity without a rolling restart
- in emergencies.
-
-- `raft_trailing_logs` - This controls how many log entries are left in the log
- store on disk after a snapshot is made. This should only be adjusted when
- followers cannot catch up to the leader due to a very large snapshot size
- and high write throughput causing log truncation before an snapshot can be
- fully installed on a follower. If you need to use this to recover a cluster,
- consider reducing write throughput or the amount of data stored on Consul as
- it is likely under a load it is not designed to handle. The default value is
- 10000 which is suitable for all normal workloads. Added in Consul 1.5.3.
-
- Since Consul 1.10.0 this can be reloaded using `consul reload` or sending the
- server a `SIGHUP` to allow recovery without downtime when followers can't keep
- up.
-
-## Serf Parameters
-
-- `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](/consul/docs/agent/config/cli-flags#_serf_lan_bind).
- This is an IP address, not to be confused with [`ports.serf_lan`](#serf_lan_port).
-
-- `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](/consul/docs/agent/config/cli-flags#_serf_lan_allowed_cidrs).
-
-- `serf_wan` ((#serf_wan_bind)) Equivalent to the [`-serf-wan-bind` command-line flag](/consul/docs/agent/config/cli-flags#_serf_wan_bind).
-
-- `serf_wan_allowed_cidrs` ((#serf_wan_allowed_cidrs)) Equivalent to the [`-serf-wan-allowed-cidrs` command-line flag](/consul/docs/agent/config/cli-flags#_serf_wan_allowed_cidrs).
-
-## Telemetry Parameters
-
-- `telemetry` This is a nested object that configures where
- Consul sends its runtime telemetry, and contains the following keys:
-
- - `circonus_api_token` ((#telemetry-circonus_api_token)) A valid API
- Token used to create/manage check. If provided, metric management is
- enabled.
-
- - `circonus_api_app` ((#telemetry-circonus_api_app)) A valid app name
- associated with the API token. By default, this is set to "consul".
-
- - `circonus_api_url` ((#telemetry-circonus_api_url))
- The base URL to use for contacting the Circonus API. By default, this is set
- to "https://api.circonus.com/v2".
-
- - `circonus_submission_interval` ((#telemetry-circonus_submission_interval)) The interval at which metrics are submitted to Circonus. By default, this is set to "10s" (ten seconds).
-
- - `circonus_submission_url` ((#telemetry-circonus_submission_url))
- The `check.config.submission_url` field, of a Check API object, from a previously
- created HTTPTrap check.
-
- - `circonus_check_id` ((#telemetry-circonus_check_id))
- The Check ID (not **check bundle**) from a previously created HTTPTrap check.
- The numeric portion of the `check._cid` field in the Check API object.
-
- - `circonus_check_force_metric_activation` ((#telemetry-circonus_check_force_metric_activation)) Force activation of metrics which already exist and are not currently active.
- If check management is enabled, the default behavior is to add new metrics as
- they are encountered. If the metric already exists in the check, it will **not**
- be activated. This setting overrides that behavior. By default, this is set to
- false.
-
- - `circonus_check_instance_id` ((#telemetry-circonus_check_instance_id)) Uniquely identifies the metrics coming from this **instance**. It can be used to
- maintain metric continuity with transient or ephemeral instances as they move
- around within an infrastructure. By default, this is set to hostname:application
- name (e.g. "host123:consul").
-
- - `circonus_check_search_tag` ((#telemetry-circonus_check_search_tag)) A special tag which, when coupled with the instance id, helps to narrow down
- the search results when neither a Submission URL or Check ID is provided. By
- default, this is set to service:application name (e.g. "service:consul").
-
- - `circonus_check_display_name` ((#telemetry-circonus_check_display_name)) Specifies a name to give a check when it is created. This name is displayed in
- the Circonus UI Checks list. Available in Consul 0.7.2 and later.
-
- - `circonus_check_tags` ((#telemetry-circonus_check_tags))
- Comma separated list of additional tags to add to a check when it is created.
- Available in Consul 0.7.2 and later.
-
- - `circonus_broker_id` ((#telemetry-circonus_broker_id))
- The ID of a specific Circonus Broker to use when creating a new check. The numeric
- portion of `broker._cid` field in a Broker API object. If metric management is
- enabled and neither a Submission URL nor Check ID is provided, an attempt will
- be made to search for an existing check using Instance ID and Search Tag. If
- one is not found, a new HTTPTrap check will be created. By default, this is not
- used and a random Enterprise Broker is selected, or the default Circonus Public
- Broker.
-
- - `circonus_broker_select_tag` ((#telemetry-circonus_broker_select_tag)) A special tag which will be used to select a Circonus Broker when a Broker ID
- is not provided. The best use of this is to as a hint for which broker should
- be used based on **where** this particular instance is running (e.g. a specific
- geo location or datacenter, dc:sfo). By default, this is left blank and not used.
-
- - `disable_hostname` ((#telemetry-disable_hostname))
- Set to `true` to stop prepending the machine's hostname to gauge-type metrics. Default is `false`.
-
- - `disable_per_tenancy_usage_metrics` ((#telemetry-disable_per_tenancy_usage_metrics))
- Set to `true` to exclude tenancy labels from usage metrics. This significantly decreases CPU utilization in clusters with many admin partitions or namespaces.
-
- - `dogstatsd_addr` ((#telemetry-dogstatsd_addr)) This provides the address
- of a DogStatsD instance in the format `host:port`. DogStatsD is a protocol-compatible
- flavor of statsd, with the added ability to decorate metrics with tags and event
- information. If provided, Consul will send various telemetry information to that
- instance for aggregation. This can be used to capture runtime information.
-
- - `dogstatsd_tags` ((#telemetry-dogstatsd_tags)) This provides a list
- of global tags that will be added to all telemetry packets sent to DogStatsD.
- It is a list of strings, where each string looks like "my_tag_name:my_tag_value".
-
- - `enable_host_metrics` ((#telemetry-enable_host_metrics))
- This enables reporting of host metrics about system resources, defaults to false.
-
- - `filter_default` ((#telemetry-filter_default))
- This controls whether to allow metrics that have not been specified by the filter.
- Defaults to `true`, which will allow all metrics when no filters are provided.
- When set to `false` with no filters, no metrics will be sent.
-
- - `metrics_prefix` ((#telemetry-metrics_prefix))
- The prefix used while writing all telemetry data. By default, this is set to
- "consul". This was added in Consul 1.0. For previous versions of Consul, use
- the config option `statsite_prefix` in this same structure. This was renamed
- in Consul 1.0 since this prefix applied to all telemetry providers, not just
- statsite.
-
- - `prefix_filter` ((#telemetry-prefix_filter))
- This is a list of filter rules to apply for allowing/blocking metrics by
- prefix in the following format:
-
-
-
- ```hcl
- telemetry {
- prefix_filter = ["+consul.raft.apply", "-consul.http", "+consul.http.GET"]
- }
- ```
-
- ```json
- {
- "telemetry": {
- "prefix_filter": [
- "+consul.raft.apply",
- "-consul.http",
- "+consul.http.GET"
- ]
- }
- }
- ```
-
-
-
- A leading "**+**" will enable any metrics with the given prefix, and a leading "**-**" will block them. If there is overlap between two rules, the more specific rule will take precedence. Blocking will take priority if the same prefix is listed multiple times.
-
- - `prometheus_retention_time` ((#telemetry-prometheus_retention_time)) If the value is greater than `0s` (the default), this enables [Prometheus](https://prometheus.io/)
- export of metrics. The duration can be expressed using the duration semantics
- and will aggregates all counters for the duration specified (it might have an
- impact on Consul's memory usage). A good value for this parameter is at least
- 2 times the interval of scrape of Prometheus, but you might also put a very high
- retention time such as a few days (for instance 744h to enable retention to 31
- days). Fetching the metrics using prometheus can then be performed using the
- [`/v1/agent/metrics?format=prometheus`](/consul/api-docs/agent#view-metrics) endpoint.
- The format is compatible natively with prometheus. When running in this mode,
- it is recommended to also enable the option [`disable_hostname`](#telemetry-disable_hostname)
- to avoid having prefixed metrics with hostname. Consul does not use the default
- Prometheus path, so Prometheus must be configured as follows. Note that using
- `?format=prometheus` in the path won't work as `?` will be escaped, so it must be
- specified as a parameter.
-
-
-
- ```yaml
- metrics_path: '/v1/agent/metrics'
- params:
- format: ['prometheus']
- ```
-
-
-
- - `statsd_address` ((#telemetry-statsd_address)) This provides the address
- of a statsd instance in the format `host:port`. If provided, Consul will send
- various telemetry information to that instance for aggregation. This can be used
- to capture runtime information. This sends UDP packets only and can be used with
- statsd or statsite.
-
- - `statsite_address` ((#telemetry-statsite_address)) This provides the
- address of a statsite instance in the format `host:port`. If provided, Consul
- will stream various telemetry information to that instance for aggregation. This
- can be used to capture runtime information. This streams via TCP and can only
- be used with statsite.
-
-## UI Parameters
-
-- `ui` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.enabled`](#ui_config_enabled) field instead.**
- Equivalent to the [`-ui`](/consul/docs/agent/config/cli-flags#_ui) command-line flag.
-
-- `ui_config` - This object allows a number of sub-keys to be set which controls
- the display or features available in the UI. Configuring the UI with this
- stanza was added in Consul 1.9.0.
-
- The following sub-keys are available:
-
- - `enabled` ((#ui_config_enabled)) - This enables the service of the web UI
- from this agent. Boolean value, defaults to false. In `-dev` mode this
- defaults to true. Replaces `ui` from before 1.9.0. Equivalent to the
- [`-ui`](/consul/docs/agent/config/cli-flags#_ui) command-line flag.
-
- - `dir` ((#ui_config_dir)) - This specifies that the web UI should be served
- from an external dir rather than the build in one. This allows for
- customization or development. Replaces `ui_dir` from before 1.9.0.
- Equivalent to the [`-ui-dir`](/consul/docs/agent/config/cli-flags#_ui_dir) command-line flag.
-
- - `content_path` ((#ui_config_content_path)) - This specifies the HTTP path
- that the web UI should be served from. Defaults to `/ui/`. Equivalent to the
- [`-ui-content-path`](/consul/docs/agent/config/cli-flags#_ui_content_path) flag.
-
- - `metrics_provider` ((#ui_config_metrics_provider)) - Specifies a named
- metrics provider implementation the UI should use to fetch service metrics.
- By default metrics are disabled. Consul 1.9.0 includes a built-in provider
- named `prometheus` that can be enabled explicitly here. It also requires the
- `metrics_proxy` to be configured below and direct queries to a Prometheus
- instance that has Envoy metrics for all services in the datacenter.
-
- - `metrics_provider_files` ((#ui_config_metrics_provider_files)) - An optional array
- of absolute paths to javascript files on the Agent's disk which will be
- served as part of the UI. These files should contain metrics provider
- implementations and registration enabling UI metric queries to be customized
- or implemented for an alternative time-series backend.
-
- ~> **Security Note:** These javascript files are included in the UI with no
- further validation or sand-boxing. By configuring them here the operator is
- fully trusting anyone able to write to them as well as the original authors
- not to include malicious code in the UI being served.
-
- - `metrics_provider_options_json` ((#ui_config_metrics_provider_options_json)) -
- This is an optional raw JSON object as a string which is passed to the
- provider implementation's `init` method at startup to allow arbitrary
- configuration to be passed through.
-
- - `metrics_proxy` ((#ui_config_metrics_proxy)) - This object configures an
- internal agent API endpoint that will proxy GET requests to a metrics
- backend to allow querying metrics data in the UI. This simplifies deployment
- where the metrics backend is not exposed externally to UI users' browsers.
- It may also be used to augment requests with API credentials to allow
- serving graphs to UI users without them needing individual access tokens for
- the metrics backend.
-
- ~> **Security Note:** Exposing your metrics backend via Consul in this way
- should be carefully considered in production. As Consul doesn't understand
- the requests, it can't limit access to only specific resources. For example
- **this might make it possible for a malicious user on the network to query
- for arbitrary metrics about any server or workload in your infrastructure,
- or overload the metrics infrastructure with queries**. See [Metrics Proxy
- Security](/consul/docs/connect/observability/ui-visualization#metrics-proxy-security)
- for more details.
-
- The following sub-keys are available:
-
- - `base_url` ((#ui_config_metrics_provider_base_url)) - This is required to
- enable the proxy. It should be set to the base URL that the Consul agent
- should proxy requests for metrics too. For example a value of
- `http://prometheus-server` would target a Prometheus instance with local
- DNS name "prometheus-server" on port 80. This may include a path prefix
- which will then not be necessary in provider requests to the backend and
- the proxy will prevent any access to paths without that prefix on the
- backend.
-
- - `path_allowlist` ((#ui_config_metrics_provider_path_allowlist)) - This
- specifies the paths that may be proxies to when appended to the
- `base_url`. It defaults to `["/api/v1/query_range", "/api/v1/query"]`
- which are the endpoints required for the built-in Prometheus provider. If
- a [custom
- provider](/consul/docs/connect/observability/ui-visualization#custom-metrics-providers)
- is used that requires the metrics proxy, the correct allowlist must be
- specified to enable proxying to necessary endpoints. See [Path
- Allowlist](/consul/docs/connect/observability/ui-visualization#path-allowlist)
- for more information.
-
- - `add_headers` ((#ui_config_metrics_proxy_add_headers)) - This is an
- optional list if headers to add to requests that are proxied to the
- metrics backend. It may be used to inject Authorization tokens within the
- agent without exposing those to UI users.
-
- Each item in the list is an object with the following keys:
-
- - `name` ((#ui_config_metrics_proxy_add_headers_name)) - Specifies the
- HTTP header name to inject into proxied requests.
-
- - `value` ((#ui_config_metrics_proxy_add_headers_value)) - Specifies the
- value in inject into proxied requests.
-
- - `dashboard_url_templates` ((#ui_config_dashboard_url_templates)) - This map
- specifies URL templates that may be used to render links to external
- dashboards in various contexts in the UI. It is a map with the name of the
- template as a key. The value is a string URL with optional placeholders.
-
- Each template may contain placeholders which will be substituted for the
- correct values in content when rendered in the UI. The placeholders
- available are listed for each template.
-
- For more information and examples see [UI
- Visualization](/consul/docs/connect/observability/ui-visualization#configuring-dashboard-urls)
-
- The following named templates are defined:
-
- - `service` ((#ui_config_dashboard_url_templates_service)) - This is the URL
- to use when linking to the dashboard for a specific service. It is shown
- as part of the [Topology
- Visualization](/consul/docs/connect/observability/ui-visualization).
-
- The placeholders available are:
-
- - `{{Service.Name}}` - Replaced with the current service's name.
- - `{{Service.Namespace}}` - Replaced with the current service's namespace or empty if namespaces are not enabled.
- - `{{Service.Partition}}` - Replaced with the current service's admin
- partition or empty if admin partitions are not enabled.
- - `{{Datacenter}}` - Replaced with the current service's datacenter.
-
-- `ui_dir` - **This field is deprecated in Consul 1.9.0. See the [`ui_config.dir`](#ui_config_dir) field instead.**
- Equivalent to the [`-ui-dir`](/consul/docs/agent/config/cli-flags#_ui_dir) command-line
- flag. This configuration key is not required as of Consul version 0.7.0 and later.
- Specifying this configuration key will enable the web UI. There is no need to specify
- both ui-dir and ui. Specifying both will result in an error.
-
-## TLS Configuration Reference
-
-This section documents all of the configuration settings that apply to Agent TLS. Agent
-TLS is used by the HTTP API, internal RPC, and gRPC/xDS interfaces. Some of these settings
-may also be applied automatically by [auto_config](#auto_config) or [auto_encrypt](#auto_encrypt).
-
-~> **Security Note:** The Certificate Authority (CA) configured on the internal RPC interface
-(either explicitly by `tls.internal_rpc` or implicitly by `tls.defaults`) should be a private
-CA, not a public one. We recommend using a dedicated CA which should not be used with any other
-systems. Any certificate signed by the CA will be allowed to communicate with the cluster and a
-specially crafted certificate signed by the CA can be used to gain full access to Consul.
-
-- `tls` Added in Consul 1.12, for previous versions see
- [Deprecated Options](#tls_deprecated_options).
-
- - `defaults` ((#tls_defaults)) Provides default settings that will be applied
- to every interface unless explicitly overridden by `tls.grpc`, `tls.https`,
- or `tls.internal_rpc`.
-
- - `ca_file` ((#tls_defaults_ca_file)) This provides a file path to a
- PEM-encoded certificate authority. The certificate authority is used to
- check the authenticity of client and server connections with the
- appropriate [`verify_incoming`](#tls_defaults_verify_incoming) or
- [`verify_outgoing`](#tls_defaults_verify_outgoing) flags.
-
- - `ca_path` ((#tls_defaults_ca_path)) This provides a path to a directory
- of PEM-encoded certificate authority files. These certificate authorities
- are used to check the authenticity of client and server connections with
- the appropriate [`verify_incoming`](#tls_defaults_verify_incoming) or
- [`verify_outgoing`](#tls_defaults_verify_outgoing) flags.
-
- - `cert_file` ((#tls_defaults_cert_file)) This provides a file path to a
- PEM-encoded certificate. The certificate is provided to clients or servers
- to verify the agent's authenticity. It must be provided along with
- [`key_file`](#tls_defaults_key_file).
-
- - `key_file` ((#tls_defaults_key_file)) This provides a the file path to a
- PEM-encoded private key. The key is used with the certificate to verify
- the agent's authenticity. This must be provided along with
- [`cert_file`](#tls_defaults_cert_file).
-
- - `tls_min_version` ((#tls_defaults_tls_min_version)) This specifies the
- minimum supported version of TLS. The following values are accepted:
- * `TLSv1_0`
- * `TLSv1_1`
- * `TLSv1_2` (default)
- * `TLSv1_3`
-
- - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When
- set to true, Consul verifies the TLS certificate presented by the servers
- match the hostname `server..`. By default this is false,
- and Consul does not verify the hostname of the certificate, only that it
- is signed by a trusted CA.
-
- **WARNING: TLS 1.1 and lower are generally considered less secure and
- should not be used if possible.**
-
- The following values are also valid, but only when using the
- [deprecated top-level `tls_min_version` config](#tls_deprecated_options),
- and will be removed in a future release:
-
- * `tls10`
- * `tls11`
- * `tls12`
- * `tls13`
-
- A warning message will appear if a deprecated value is specified.
-
- - `tls_cipher_suites` ((#tls_defaults_tls_cipher_suites)) This specifies
- the list of supported ciphersuites as a comma-separated-list. Applicable
- to TLS 1.2 and below only. The list of all ciphersuites supported by Consul is
- available in [the TLS configuration source code](https://github.com/hashicorp/consul/search?q=%22var+goTLSCipherSuites%22).
-
- ~> **Note:** The ordering of cipher suites will not be guaranteed from
- Consul 1.11 onwards. See this [post](https://go.dev/blog/tls-cipher-suites)
- for details.
-
- - `verify_incoming` - ((#tls_defaults_verify_incoming)) If set to true,
- Consul requires that all incoming connections make use of TLS and that
- the client provides a certificate signed by a Certificate Authority from
- the [`ca_file`](#tls_defaults_ca_file) or [`ca_path`](#tls_defaults_ca_path).
- By default, this is false, and Consul will not enforce the use of TLS or
- verify a client's authenticity.
-
- - `verify_outgoing` - ((#tls_defaults_verify_outgoing)) If set to true,
- Consul requires that all outgoing connections from this agent make use
- of TLS and that the server provides a certificate that is signed by a
- Certificate Authority from the [`ca_file`](#tls_defaults_ca_file) or
- [`ca_path`](#tls_defaults_ca_path). By default, this is false, and Consul
- will not make use of TLS for outgoing connections. This applies to clients
- and servers as both will make outgoing connections. This setting does not
- apply to the gRPC interface as Consul makes no outgoing connections on this
- interface. When set to true for the HTTPS interface, this parameter applies to [watches](/consul/docs/dynamic-app-config/watches), which operate by making HTTPS requests to the local agent.
-
- - `grpc` ((#tls_grpc)) Provides settings for the gRPC/xDS interface. To enable
- the gRPC interface you must define a port via [`ports.grpc_tls`](#grpc_tls_port).
-
- - `ca_file` ((#tls_grpc_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file).
-
- - `ca_path` ((#tls_grpc_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path).
-
- - `cert_file` ((#tls_grpc_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file).
-
- - `key_file` ((#tls_grpc_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file).
-
- - `tls_min_version` ((#tls_grpc_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version).
-
- - `tls_cipher_suites` ((#tls_grpc_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites).
-
- - `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
-
- - `use_auto_cert` - (Defaults to `false`) Enables or disables TLS on gRPC servers. Set to `true` to allow `auto_encrypt` TLS settings to apply to gRPC listeners. We recommend disabling TLS on gRPC servers if you are using `auto_encrypt` for other TLS purposes, such as enabling HTTPS.
-
- - `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable
- the HTTPS interface you must define a port via [`ports.https`](#https_port).
-
- - `ca_file` ((#tls_https_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file).
-
- - `ca_path` ((#tls_https_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path).
-
- - `cert_file` ((#tls_https_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file).
-
- - `key_file` ((#tls_https_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file).
-
- - `tls_min_version` ((#tls_https_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version).
-
- - `tls_cipher_suites` ((#tls_https_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites).
-
- - `verify_incoming` - ((#tls_https_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
-
- - `verify_outgoing` - ((#tls_https_verify_outgoing)) Overrides [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing).
-
- - `internal_rpc` ((#tls_internal_rpc)) Provides settings for the internal
- "server" RPC interface configured by [`ports.server`](#server_rpc_port).
-
- - `ca_file` ((#tls_internal_rpc_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file).
-
- - `ca_path` ((#tls_internal_rpc_ca_path)) Overrides [`tls.defaults.ca_path`](#tls_defaults_ca_path).
-
- - `cert_file` ((#tls_internal_rpc_cert_file)) Overrides [`tls.defaults.cert_file`](#tls_defaults_cert_file).
-
- - `key_file` ((#tls_internal_rpc_key_file)) Overrides [`tls.defaults.key_file`](#tls_defaults_key_file).
-
- - `tls_min_version` ((#tls_internal_rpc_tls_min_version)) Overrides [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version).
-
- - `tls_cipher_suites` ((#tls_internal_rpc_tls_cipher_suites)) Overrides [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites).
-
- - `verify_incoming` - ((#tls_internal_rpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
-
- ~> **Security Note:** `verify_incoming` *must* be set to true to prevent
- anyone with access to the internal RPC port from gaining full access to
- the Consul cluster.
-
- - `verify_outgoing` ((#tls_internal_rpc_verify_outgoing)) Overrides [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing).
-
- ~> **Security Note:** Servers that specify `verify_outgoing = true` will
- always talk to other servers over TLS, but they still _accept_ non-TLS
- connections to allow for a transition of all clients to TLS. Currently the
- only way to enforce that no client can communicate with a server unencrypted
- is to also enable `verify_incoming` which requires client certificates too.
-
- - `verify_server_hostname` Overrides [tls.defaults.verify_server_hostname](#tls_defaults_verify_server_hostname). When
- set to true, Consul verifies the TLS certificate presented by the servers
- match the hostname `server..`. By default this is false,
- and Consul does not verify the hostname of the certificate, only that it
- is signed by a trusted CA.
-
- ~> **Security Note:** `verify_server_hostname` *must* be set to true to prevent a
- compromised client from gaining full read and write access to all cluster
- data *including all ACL tokens and service mesh CA root keys*.
-
-- `server_name` When provided, this overrides the [`node_name`](#_node)
- for the TLS certificate. It can be used to ensure that the certificate name matches
- the hostname we declare.
-
-### Deprecated Options ((#tls_deprecated_options))
-
-The following options were deprecated in Consul 1.12, please use the
-[`tls`](#tls-1) stanza instead.
-
-- `ca_file` See: [`tls.defaults.ca_file`](#tls_defaults_ca_file).
-
-- `ca_path` See: [`tls.defaults.ca_path`](#tls_defaults_ca_path).
-
-- `cert_file` See: [`tls.defaults.cert_file`](#tls_defaults_cert_file).
-
-- `key_file` See: [`tls.defaults.key_file`](#tls_defaults_key_file).
-
-- `tls_min_version` Added in Consul 0.7.4.
- See: [`tls.defaults.tls_min_version`](#tls_defaults_tls_min_version).
-
-- `tls_cipher_suites` Added in Consul 0.8.2.
- See: [`tls.defaults.tls_cipher_suites`](#tls_defaults_tls_cipher_suites).
-
-- `tls_prefer_server_cipher_suites` Added in Consul 0.8.2. This setting will
- be ignored (see [this post](https://go.dev/blog/tls-cipher-suites) for details).
-
-- `verify_incoming` See: [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
-
-- `verify_incoming_rpc` See: [`tls.internal_rpc.verify_incoming`](#tls_internal_rpc_verify_incoming).
-
-- `verify_incoming_https` See: [`tls.https.verify_incoming`](#tls_https_verify_incoming).
-
-- `verify_outgoing` See: [`tls.defaults.verify_outgoing`](#tls_defaults_verify_outgoing).
-
-- `verify_server_hostname` See: [`tls.internal_rpc.verify_server_hostname`](#tls_internal_rpc_verify_server_hostname).
-
-### Example Configuration File, with TLS
-
-~> **Security Note:** all three verify options should be set as `true` to enable
-secure mTLS communication, enabling both encryption and authentication. Failing
-to set [`verify_incoming`](#tls_defaults_verify_incoming) or
-[`verify_outgoing`](#tls_defaults_verify_outgoing) either in the
-interface-specific stanza (e.g. `tls.internal_rpc`, `tls.https`) or in
-`tls.defaults` will result in TLS not being enabled at all, even when specifying
-a [`ca_file`](#tls_defaults_ca_file), [`cert_file`](#tls_defaults_cert_file),
-and [`key_file`](#tls_defaults_key_file).
-
-See, especially, the use of the `ports` setting highlighted below.
-
-
-
-
-
-```hcl
-datacenter = "east-aws"
-data_dir = "/opt/consul"
-log_level = "INFO"
-node_name = "foobar"
-server = true
-
-addresses = {
- https = "0.0.0.0"
-}
-ports {
- https = 8501
-}
-
-tls {
- defaults {
- key_file = "/etc/pki/tls/private/my.key"
- cert_file = "/etc/pki/tls/certs/my.crt"
- ca_file = "/etc/pki/tls/certs/ca-bundle.crt"
- verify_incoming = true
- verify_outgoing = true
- verify_server_hostname = true
- }
-}
-```
-
-
-
-
-
-```json
-{
- "datacenter": "east-aws",
- "data_dir": "/opt/consul",
- "log_level": "INFO",
- "node_name": "foobar",
- "server": true,
- "addresses": {
- "https": "0.0.0.0"
- },
- "ports": {
- "https": 8501
- },
- "tls": {
- "defaults": {
- "key_file": "/etc/pki/tls/private/my.key",
- "cert_file": "/etc/pki/tls/certs/my.crt",
- "ca_file": "/etc/pki/tls/certs/ca-bundle.crt",
- "verify_incoming": true,
- "verify_outgoing": true,
- "verify_server_hostname": true
- }
- }
-}
-```
-
-
-
-
-
-Consul will not enable TLS for the HTTP or gRPC API unless the `https` port has
-been assigned a port number `> 0`. We recommend using `8501` for `https` as this
-default will automatically work with some tooling.
-
-## xDS Server Parameters
-
-- `xds`: This object allows you to configure the behavior of Consul's
-[xDS protocol](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol)
-server.
-
- - `update_max_per_second`: Specifies the number of proxy configuration updates across all connected xDS streams that are allowed per second. This configuration prevents updates to global resources, such as wildcard intentions, from consuming system resources at the expense of other processes, such as Raft and Gossip, which could cause general cluster instability.
-
- The default value is `250`. It is based on a load test of 5,000 streams connected to a single server with two CPU cores.
-
- If necessary, you can lower or increase the limit without a rolling restart by using the `consul reload` command or by sending the server a `SIGHUP`.
diff --git a/website/content/docs/agent/config/index.mdx b/website/content/docs/agent/config/index.mdx
deleted file mode 100644
index c620ef72e05a..000000000000
--- a/website/content/docs/agent/config/index.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: docs
-page_title: Agents - Configuration Explained
-description: >-
- Agent configuration is the process of defining server and client agent properties with CLI flags and configuration files. Learn what properties can be configured on reload and how Consul sets precedence for configuration settings.
----
-
-# Agent Configuration
-
-The agent has various configuration options that can be specified via
-the command-line or via configuration files. All of the configuration
-options are completely optional. Defaults are specified with their
-descriptions.
-
-Configuration precedence is evaluated in the following order:
-
-1. [Command line arguments](/consul/docs/agent/config/cli-flags)
-2. [Configuration files](/consul/docs/agent/config/config-files)
-
-When loading configuration, the Consul agent loads the configuration from files and
-directories in lexical order. For example, configuration file
-`basic_config.json` will be processed before `extra_config.json`. Configuration
-can be in either [HCL](https://github.com/hashicorp/hcl#syntax) or JSON format.
-Available in Consul 1.0 and later, the HCL support now requires an `.hcl` or
-`.json` extension on all configuration files in order to specify their format.
-
-Configuration specified later will be merged into configuration specified
-earlier. In most cases, "merge" means that the later version will override the
-earlier. In some cases, such as event handlers, merging appends the handlers to
-the existing configuration. The exact merging behavior is specified for each
-option below.
-
-The Consul agent also supports reloading configuration when it receives the
-SIGHUP signal. Not all changes are respected, but those that are
-documented below in the
-[Reloadable Configuration](#reloadable-configuration) section. The
-[reload command](/consul/commands/reload) can also be used to trigger a
-configuration reload.
-
-You can test the following configuration options by following the
-[Get Started](/consul/tutorials/get-started-vms?utm_source=docs)
-tutorials to install an agent in a VM.
-
-## Ports Used
-
-Consul requires up to 6 different ports to work properly, some on
-TCP, UDP, or both protocols.
-
-Review the [required ports](/consul/docs/install/ports) table for a list of
-required ports and their default settings.
-
-## Reloadable Configuration
-
-Some agent configuration options are reloadable at runtime.
-You can run the [`consul reload` command](/consul/commands/reload) to manually reload supported options from configuration files in the configuration directory.
-To configure the agent to automatically reload configuration files updated on disk,
-set the [`auto_reload_config` configuration option](/consul/docs/agent/config/config-files#auto_reload_config) parameter to `true`.
-
-The following agent configuration options are reloadable at runtime:
-- ACL Tokens
-- [Configuration Entry Bootstrap](/consul/docs/agent/config/config-files#config_entries_bootstrap)
-- Checks
-- [Discard Check Output](/consul/docs/agent/config/config-files#discard_check_output)
-- HTTP Client Address
-- Log level
-- [Metric Prefix Filter](/consul/docs/agent/config/config-files#telemetry-prefix_filter)
-- [Node Metadata](/consul/docs/agent/config/config-files#node_meta)
-- Some Raft options (since Consul 1.10.0)
- - [`raft_snapshot_threshold`](/consul/docs/agent/config/config-files#_raft_snapshot_threshold)
- - [`raft_snapshot_interval`](/consul/docs/agent/config/config-files#_raft_snapshot_interval)
- - [`raft_trailing_logs`](/consul/docs/agent/config/config-files#_raft_trailing_logs)
- - These can be important in certain outage situations so being able to control
- them without a restart provides a recovery path that doesn't involve
- downtime. They generally shouldn't be changed otherwise.
-- [RPC rate limits](/consul/docs/agent/config/config-files#limits)
-- [Reporting](/consul/docs/agent/config/config-files#reporting)
-- [HTTP Maximum Connections per Client](/consul/docs/agent/config/config-files#http_max_conns_per_client)
-- Services
-- TLS Configuration
- - Please be aware that this is currently limited to reload a configuration that is already TLS enabled. You cannot enable or disable TLS only with reloading.
- - To avoid a potential security issue, the following TLS configuration parameters do not automatically reload when [-auto-reload-config](/consul/docs/agent/config/cli-flags#_auto_reload_config) is enabled:
- - [encrypt_verify_incoming](/consul/docs/agent/config/config-files#encrypt_verify_incoming)
- - [verify_incoming](/consul/docs/agent/config/config-files#verify_incoming)
- - [verify_incoming_rpc](/consul/docs/agent/config/config-files#verify_incoming_rpc)
- - [verify_incoming_https](/consul/docs/agent/config/config-files#verify_incoming_https)
- - [verify_outgoing](/consul/docs/agent/config/config-files#verify_outgoing)
- - [verify_server_hostname](/consul/docs/agent/config/config-files#verify_server_hostname)
- - [ca_file](/consul/docs/agent/config/config-files#ca_file)
- - [ca_path](/consul/docs/agent/config/config-files#ca_path)
-
- If any of those configurations are changed while [-auto-reload-config](/consul/docs/agent/config/cli-flags#_auto_reload_config) is enabled,
- Consul will issue the following warning, `Static Runtime config has changed and need a manual config reload to be applied`.
- You must manually issue the `consul reload` command or send a `SIGHUP` to the Consul process to reload the new values.
-- Watches
-- [License](/consul/docs/enterprise/license/overview)
diff --git a/website/content/docs/agent/index.mdx b/website/content/docs/agent/index.mdx
deleted file mode 100644
index 468e9087c2ae..000000000000
--- a/website/content/docs/agent/index.mdx
+++ /dev/null
@@ -1,464 +0,0 @@
----
-layout: docs
-page_title: Agents Overview
-description: >-
- Agents maintain register services, respond to queries, maintain datacenter membership information, and make most of Consul’s functions possible. Learn how to start, stop, and configure agents, as well as their requirements and lifecycle.
----
-
-# Agents Overview
-
-This topic provides an overview of the Consul agent, which is the core process of Consul.
-The agent maintains membership information, registers services, runs checks, responds to queries, and more.
-The agent must run on every node that is part of a Consul cluster.
-
-Agents run in either client or server mode. Client nodes are lightweight processes that make up the majority of the cluster.
-They interface with the server nodes for most operations and maintain very little state of their own.
-Clients run on every node where services are running.
-
-In addition to the core agent operations, server nodes participate in the [consensus quorum](/consul/docs/architecture/consensus).
-The quorum is based on the Raft protocol, which provides strong consistency and availability in the case of failure.
-Server nodes should run on dedicated instances because they are more resource intensive than client nodes.
-
-## Lifecycle
-
-Every agent in the Consul cluster goes through a lifecycle.
-Understanding the lifecycle is useful for building a mental model of an agent's interactions with a cluster and how the cluster treats a node.
-The following process describes the agent lifecycle within the context of an existing cluster:
-
-1. **An agent is started** either manually or through an automated or programmatic process.
- Newly-started agents are unaware of other nodes in the cluster.
-1. **An agent joins a cluster**, which enables the agent to discover agent peers.
- Agents join clusters on startup when the [`join`](/consul/commands/join) command is issued or according the [auto-join configuration](/consul/docs/install/cloud-auto-join).
-1. **Information about the agent is gossiped to the entire cluster**.
- As a result, all nodes will eventually become aware of each other.
-1. **Existing servers will begin replicating to the new node** if the agent is a server.
-
-### Failures and crashes
-
-In the event of a network failure, some nodes may be unable to reach other nodes.
-Unreachable nodes will be marked as _failed_.
-
-Distinguishing between a network failure and an agent crash is impossible.
-As a result, agent crashes are handled in the same manner is network failures.
-
-Once a node is marked as failed, this information is updated in the service
-catalog.
-
--> **Note:** Updating the catalog is only possible if the servers can still [form a quorum](/consul/docs/architecture/consensus).
-Once the network recovers or a crashed agent restarts, the cluster will repair itself and unmark a node as failed.
-The health check in the catalog will also be updated to reflect the current state.
-
-### Exiting nodes
-
-When a node leaves a cluster, it communicates its intent and the cluster marks the node as having _left_.
-In contrast to changes related to failures, all of the services provided by a node are immediately deregistered.
-If a server agent leaves, replication to the exiting server will stop.
-
-To prevent an accumulation of dead nodes (nodes in either _failed_ or _left_
-states), Consul will automatically remove dead nodes out of the catalog. This
-process is called _reaping_. This is currently done on a configurable
-interval of 72 hours (changing the reap interval is _not_ recommended due to
-its consequences during outage situations). Reaping is similar to leaving,
-causing all associated services to be deregistered.
-
-## Limit traffic rates
-You can define a set of rate limiting configurations that help operators protect Consul servers from excessive or peak usage. The configurations enable you to gracefully degrade Consul servers to avoid a global interruption of service. Consul supports global server rate limiting, which lets configure Consul servers to deny requests that exceed the read or write limits. Refer to [Traffic Rate Limits Overview](/consul/docs/agent/limits).
-
-## Requirements
-
-You should run one Consul agent per server or host.
-Instances of Consul can run in separate VMs or as separate containers.
-At least one server agent per Consul deployment is required, but three to five server agents are recommended.
-Refer to the following sections for information about host, port, memory, and other requirements:
-
-- [Server Performance](/consul/docs/install/performance)
-- [Required Ports](/consul/docs/install/ports)
-
-The [Datacenter Deploy tutorial](/consul/tutorials/production-deploy/reference-architecture#deployment-system-requirements) contains additional information, including licensing configuration, environment variables, and other details.
-
-### Maximum latency network requirements
-
-Consul uses the gossip protocol to share information across agents. To function properly, you cannot exceed the protocol's maximum latency threshold. The latency threshold is calculated according to the total round trip time (RTT) for communication between all agents. Other network usages outside of Gossip are not bound by these latency requirements (i.e. client to server RPCs, HTTP API requests, xDS proxy configuration, DNS).
-
-For data sent between all Consul agents the following latency requirements must be met:
-
-- Average RTT for all traffic cannot exceed 50ms.
-- RTT for 99 percent of traffic cannot exceed 100ms.
-
-## Starting the Consul agent
-
-Start a Consul agent with the `consul` command and `agent` subcommand using the following syntax:
-
-```shell-session
-$ consul agent
-```
-
-Consul ships with a `-dev` flag that configures the agent to run in server mode and several additional settings that enable you to quickly get started with Consul.
-The `-dev` flag is provided for learning purposes only.
-We strongly advise against using it for production environments.
-
--> **Getting Started Tutorials**: You can test a local agent in a VM by following the
-[Get Started tutorials](/consul/tutorials/get-started-vms?utm_source=docs).
-
-When starting Consul with the `-dev` flag, the only additional information Consul needs to run is the location of a directory for storing agent state data.
-You can specify the location with the `-data-dir` flag or define the location in an external file and point the file with the `-config-file` flag.
-
-You can also point to a directory containing several configuration files with the `-config-dir` flag.
-This enables you to logically group configuration settings into separate files. See [Configuring Consul Agents](/consul/docs/agent#configuring-consul-agents) for additional information.
-
-The following example starts an agent in dev mode and stores agent state data in the `tmp/consul` directory:
-
-```shell-session
-$ consul agent -data-dir=tmp/consul -dev
-```
-
-Agents are highly configurable, which enables you to deploy Consul to any infrastructure. Many of the default options for the `agent` command are suitable for becoming familiar with a local instance of Consul. In practice, however, several additional configuration options must be specified for Consul to function as expected. Refer to [Agent Configuration](/consul/docs/agent/config) topic for a complete list of configuration options.
-
-### Understanding the agent startup output
-
-Consul prints several important messages on startup.
-The following example shows output from the [`consul agent`](/consul/commands/agent) command:
-
-```shell-session
-$ consul agent -data-dir=/tmp/consul
-==> Starting Consul agent...
-==> Consul agent running!
- Node name: 'Armons-MacBook-Air'
- Datacenter: 'dc1'
- Server: false (bootstrap: false)
- Client Addr: 127.0.0.1 (HTTP: 8500, DNS: 8600)
- Cluster Addr: 192.168.1.43 (LAN: 8301, WAN: 8302)
-
-==> Log data will now stream in as it occurs:
-
- [INFO] serf: EventMemberJoin: Armons-MacBook-Air.local 192.168.1.43
-...
-```
-
-- **Node name**: This is a unique name for the agent. By default, this
- is the hostname of the machine, but you may customize it using the
- [`-node`](/consul/docs/agent/config/cli-flags#_node) flag.
-
-- **Datacenter**: This is the datacenter in which the agent is configured to
- run. For single-DC configurations, the agent will default to `dc1`, but you can configure which datacenter the agent reports to with the [`-datacenter`](/consul/docs/agent/config/cli-flags#_datacenter) flag.
- Consul has first-class support for multiple datacenters, but configuring each node to report its datacenter improves agent efficiency.
-
-- **Server**: This indicates whether the agent is running in server or client
- mode.
- Running an agent in server mode requires additional overhead. This is because they participate in the consensus quorum, store cluster state, and handle queries. A server may also be
- in ["bootstrap"](/consul/docs/agent/config/cli-flags#_bootstrap_expect) mode, which enables the server to elect itself as the Raft leader. Multiple servers cannot be in bootstrap mode because it would put the cluster in an inconsistent state.
-
-- **Client Addr**: This is the address used for client interfaces to the agent.
- This includes the ports for the HTTP and DNS interfaces. By default, this
- binds only to localhost. If you change this address or port, you'll have to
- specify a `-http-addr` whenever you run commands such as
- [`consul members`](/consul/commands/members) to indicate how to reach the
- agent. Other applications can also use the HTTP address and port
- [to control Consul](/consul/api-docs).
-
-- **Cluster Addr**: This is the address and set of ports used for communication
- between Consul agents in a cluster. Not all Consul agents in a cluster have to
- use the same port, but this address **MUST** be reachable by all other nodes.
-
-When running under `systemd` on Linux, Consul notifies systemd by sending
-`READY=1` to the `$NOTIFY_SOCKET` when a LAN join has completed. For
-this either the `join` or `retry_join` option has to be set and the
-service definition file has to have `Type=notify` set.
-
-## Configuring Consul agents
-
-You can specify many options to configure how Consul operates when issuing the `consul agent` command.
-You can also create one or more configuration files and provide them to Consul at startup using either the `-config-file` or `-config-dir` option.
-Configuration files must be written in either JSON or HCL format.
-
--> **Consul Terminology**: Configuration files are sometimes called "service definition" files when they are used to configure client agents.
-This is because clients are most commonly used to register services in the Consul catalog.
-
-The following example starts a Consul agent that takes configuration settings from a file called `server.json` located in the current working directory:
-
-```shell-session hideClipboard
-$ consul agent -config-file=server.json
-```
-
-The configuration options necessary to successfully use Consul depend on several factors, including the type of agent you are configuring (client or server), the type of environment you are deploying to (e.g., on-premise, multi-cloud, etc.), and the security options you want to implement (ACLs, gRPC encryption).
-The following examples are intended to help you understand some of the combinations you can implement to configure Consul.
-
-### Common configuration settings
-
-The following settings are commonly used in the configuration file (also called a service definition file when registering services with Consul) to configure Consul agents:
-
-| Parameter | Description | Default |
-| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
-| `node_name` | String value that specifies a name for the agent node. See [`-node-id`](/consul/docs/agent/config/cli-flags#_node_id) for details. | Hostname of the machine |
-| `server` | Boolean value that determines if the agent runs in server mode. See [`-server`](/consul/docs/agent/config/cli-flags#_server) for details. | `false` |
-| `datacenter` | String value that specifies which datacenter the agent runs in. See [-datacenter](/consul/docs/agent/config/cli-flags#_datacenter) for details. | `dc1` |
-| `data_dir` | String value that specifies a directory for storing agent state data. See [`-data-dir`](/consul/docs/agent/config/cli-flags#_data_dir) for details. | none |
-| `log_level` | String value that specifies the level of logging the agent reports. See [`-log-level`](/consul/docs/agent/config/cli-flags#_log_level) for details. | `info` |
-| `retry_join` | Array of string values that specify one or more agent addresses to join after startup. The agent will continue trying to join the specified agents until it has successfully joined another member. See [`-retry-join`](/consul/docs/agent/config/cli-flags#_retry_join) for details. | none |
-| `addresses` | Block of nested objects that define addresses bound to the agent for internal cluster communication. | `"http": "0.0.0.0"` See the Agent Configuration page for [default address values](/consul/docs/agent/config/config-files#addresses) |
-| `ports` | Block of nested objects that define ports bound to agent addresses. See (link to addresses option) for details. | See the Agent Configuration page for [default port values](/consul/docs/agent/config/config-files#ports) |
-
-### Server node in a service mesh
-
-The following example configuration is for a server agent named "`consul-server`". The server is [bootstrapped](/consul/docs/agent/config/cli-flags#_bootstrap) and the Consul GUI is enabled.
-The reason this server agent is configured for a service mesh is that the `connect` configuration is enabled. The connect subsystem provides Consul's service mesh capabilities, including service-to-service connection authorization and encryption using mutual Transport Layer Security (TLS). Applications can use sidecar proxies in a service mesh configuration to establish TLS connections for inbound and outbound connections without being aware of Consul service mesh at all. Refer to [Consul Service Mesh](/consul/docs/connect) for details.
-
-
-
-```hcl
-node_name = "consul-server"
-server = true
-bootstrap = true
-ui_config {
- enabled = true
-}
-datacenter = "dc1"
-data_dir = "consul/data"
-log_level = "INFO"
-addresses {
- http = "0.0.0.0"
-}
-connect {
- enabled = true
-}
-```
-
-
-```json
-{
- "node_name": "consul-server",
- "server": true,
- "bootstrap": true,
- "ui_config": {
- "enabled": true
- },
- "datacenter": "dc1",
- "data_dir": "consul/data",
- "log_level": "INFO",
- "addresses": {
- "http": "0.0.0.0"
- },
- "connect": {
- "enabled": true
- }
-}
-```
-
-
-
-### Server node with encryption enabled
-
-The following example shows a server node configured with encryption enabled.
-Refer to the [Security](/consul/docs/security) chapter for additional information about how to configure security options for Consul.
-
-
-
-```hcl
-node_name = "consul-server"
-server = true
-ui_config {
- enabled = true
-}
-data_dir = "consul/data"
-addresses {
- http = "0.0.0.0"
-}
-retry_join = [
- "consul-server2",
- "consul-server3"
-]
-encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w="
-
-tls {
- defaults {
- verify_incoming = true
- verify_outgoing = true
- ca_file = "/consul/config/certs/consul-agent-ca.pem"
- cert_file = "/consul/config/certs/dc1-server-consul-0.pem"
- key_file = "/consul/config/certs/dc1-server-consul-0-key.pem"
- verify_server_hostname = true
- }
-}
-
-```
-
-
-```json
-{
- "node_name": "consul-server",
- "server": true,
- "ui_config": {
- "enabled": true
- },
- "data_dir": "consul/data",
- "addresses": {
- "http": "0.0.0.0"
- },
- "retry_join": ["consul-server1", "consul-server2"],
- "encrypt": "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=",
- "tls": {
- "defaults": {
- "verify_incoming": true,
- "verify_outgoing": true,
- "ca_file": "/consul/config/certs/consul-agent-ca.pem",
- "cert_file": "/consul/config/certs/dc1-server-consul-0.pem",
- "key_file": "/consul/config/certs/dc1-server-consul-0-key.pem"
- },
- "internal_rpc": {
- "verify_server_hostname": true
- }
- }
-}
-```
-
-
-
-### Client node registering a service
-
-Using Consul as a central service registry is a common use case.
-The following example configuration includes common settings to register a service with a Consul agent and enable health checks. Refer to [Define Health Checks](/consul/docs/services/usage/checks) to learn more about health checks.
-
-
-
-```hcl
-node_name = "consul-client"
-server = false
-datacenter = "dc1"
-data_dir = "consul/data"
-log_level = "INFO"
-retry_join = ["consul-server"]
-service {
- id = "dns"
- name = "dns"
- tags = ["primary"]
- address = "localhost"
- port = 8600
- check {
- id = "dns"
- name = "Consul DNS TCP on port 8600"
- tcp = "localhost:8600"
- interval = "10s"
- timeout = "1s"
- }
-}
-
-```
-
-```json
-{
- "node_name": "consul-client",
- "server": false,
- "datacenter": "dc1",
- "data_dir": "consul/data",
- "log_level": "INFO",
- "retry_join": ["consul-server"],
- "service": {
- "id": "dns",
- "name": "dns",
- "tags": ["primary"],
- "address": "localhost",
- "port": 8600,
- "check": {
- "id": "dns",
- "name": "Consul DNS TCP on port 8600",
- "tcp": "localhost:8600",
- "interval": "10s",
- "timeout": "1s"
- }
- }
-}
-```
-
-
-
-## Client node with multiple interfaces or IP addresses
-
-The following example shows how to configure Consul to listen on multiple interfaces or IP addresses using a [go-sockaddr template].
-
-The `bind_addr` is used for internal RPC and Serf communication ([read the Agent Configuration for more information](/consul/docs/agent/config/config-files#bind_addr)).
-
-The `client_addr` configuration specifies IP addresses used for HTTP, HTTPS, DNS and gRPC servers. ([read the Agent Configuration for more information](/consul/docs/agent/config/config-files#client_addr)).
-
-
-
-```hcl
-node_name = "consul-client"
-server = false
-bootstrap = true
-ui_config {
- enabled = true
-}
-datacenter = "dc1"
-data_dir = "consul/data"
-log_level = "INFO"
-
-# used for internal RPC and Serf
-bind_addr = "0.0.0.0"
-
-# Used for HTTP, HTTPS, DNS, and gRPC addresses.
-# loopback is not included in GetPrivateInterfaces because it is not routable.
-client_addr = "{{ GetPrivateInterfaces | exclude \"type\" \"ipv6\" | join \"address\" \" \" }} {{ GetAllInterfaces | include \"flags\" \"loopback\" | join \"address\" \" \" }}"
-
-# advertises gossip and RPC interface to other nodes
-advertise_addr = "{{ GetInterfaceIP \"en0\" }}"
-```
-
-```json
-{
- "node_name": "consul-client",
- "server": false,
- "bootstrap": true,
- "ui_config": {
- "enabled": true
- },
- "datacenter": "dc1",
- "data_dir": "consul/data",
- "log_level": "INFO",
- "bind_addr": "{{ GetPrivateIP }}",
- "client_addr": "{{ GetPrivateInterfaces | exclude \"type\" \"ipv6\" | join \"address\" \" \" }} {{ GetAllInterfaces | include \"flags\" \"loopback\" | join \"address\" \" \" }}",
- "advertise_addr": "{{ GetInterfaceIP \"en0\"}}"
-}
-```
-
-
-
-## Stopping an agent
-
-An agent can be stopped in two ways: gracefully or forcefully. Servers and
-Clients both behave differently depending on the leave that is performed. There
-are two potential states a process can be in after a system signal is sent:
-_left_ and _failed_.
-
-To gracefully halt an agent, send the process an _interrupt signal_ (usually
-`Ctrl-C` from a terminal, or running `kill -INT consul_pid` ). For more
-information on different signals sent by the `kill` command, see
-[here](https://www.linux.org/threads/kill-signals-and-commands-revised.11625/)
-
-When a Client is gracefully exited, the agent first notifies the cluster it
-intends to leave the cluster. This way, other cluster members notify the
-cluster that the node has _left_.
-
-When a Server is gracefully exited, the server will not be marked as _left_.
-This is to minimally impact the consensus quorum. Instead, the Server will be
-marked as _failed_. To remove a server from the cluster, the
-[`force-leave`](/consul/commands/force-leave) command is used. Using
-`force-leave` will put the server instance in a _left_ state so long as the
-Server agent is not alive.
-
-Alternatively, you can forcibly stop an agent by sending it a
-`kill -KILL consul_pid` signal. This will stop any agent immediately. The rest
-of the cluster will eventually (usually within seconds) detect that the node has
-died and notify the cluster that the node has _failed_.
-
-For client agents, the difference between a node _failing_ and a node _leaving_
-may not be important for your use case. For example, for a web server and load
-balancer setup, both result in the same outcome: the web node is removed
-from the load balancer pool.
-
-The [`skip_leave_on_interrupt`](/consul/docs/agent/config/config-files#skip_leave_on_interrupt) and
-[`leave_on_terminate`](/consul/docs/agent/config/config-files#leave_on_terminate) configuration
-options allow you to adjust this behavior.
-
-
-
-[go-sockaddr template]: https://godoc.org/github.com/hashicorp/go-sockaddr/template
diff --git a/website/content/docs/agent/limits/index.mdx b/website/content/docs/agent/limits/index.mdx
deleted file mode 100644
index ecf6deac49bd..000000000000
--- a/website/content/docs/agent/limits/index.mdx
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: docs
-page_title: Limit Traffic Rates Overview
-description: Rate limiting is a set of Consul server agent configurations that you can use to mitigate the risks to Consul servers when clients send excessive requests to Consul resources.
----
-
-# Traffic rate limiting overview
-
-
-This topic provides overview information about the traffic rates limits you can configure for Consul datacenters.
-
-## Introduction
-
-Configuring rate limits on RPC and gRPC traffic mitigates the risks to Consul servers when client agents or services send excessive read or write requests to Consul resources. A _read_ request is defined as any request that does not modify Consul internal state. A _write_ request is defined as any request that modifies Consul internal state. Configure read and write request limits independently.
-
-## Workflow
-
-You can set global limits on the rate of read and write requests that affect individual servers in the datacenter. You can set limits for all source IP addresses, which enables you to specify a budget for read and write requests to prevent any single source IP from overwhelming the Consul server and negatively affecting the network. The following steps describe the general process for setting global read and write rate limits:
-
-1. Set arbitrary limits to begin understanding the upper boundary of RPC and gRPC loads in your network. Refer to [Initialize rate limit settings](/consul/docs/agent/limits/usage/init-rate-limits) for additional information.
-
-1. Monitor the metrics and logs and readjust the initial configurations as necessary. Refer to [Monitor rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits)
-
-1. Define your final operational limits based on your observations. If you are defining global rate limits, refer to [Set global traffic rate limits](/consul/docs/agent/limits/usage/set-global-traffic-rate-limits) for additional information. For information about setting limits per source IP address, refer to [Limit traffic rates for a source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips).
-
-
-Setting limits per source IP requires Consul Enterprise.
-
-
-### Order of operations
-
-You can define request rate limits in the agent configuration and in the control plane request limit configuration entry. The configuration entry also supports rate limit configurations for Consul resources. Consul performs the following order of operations when determining request rate limits:
-
-
-
-
-
-
-## Kubernetes
-
-To define global rate limits, configure the `request_limits` settings in the Consul Helm chart. Refer to the [Helm chart reference](/consul/docs/k8s/helm) for additional information. Refer to the [control plane request limit configuration entry reference](/consul/docs/connect/config-entries/control-plane-request-limit) for information about applying a CRD for limiting traffic rates from source IPs.
diff --git a/website/content/docs/agent/limits/usage/init-rate-limits.mdx b/website/content/docs/agent/limits/usage/init-rate-limits.mdx
deleted file mode 100644
index 1c84ca4f6e58..000000000000
--- a/website/content/docs/agent/limits/usage/init-rate-limits.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
----
-layout: docs
-page_title: Initialize rate limit settings
-description: Learn how to determine regular and peak loads in your network so that you can set the initial global rate limit configurations.
----
-
-# Initialize rate limit settings
-
-Because each network has different needs and application, you need to find out what the regular and peak loads in your network are before you set traffic limits. We recommend completing the following steps to benchmark request rates in your environment so that you can implement limits appropriate for your applications.
-
-1. In the agent configuration file, specify a global rate limit with arbitrary values based on the following conditions:
-
- - Environment where Consul servers are running
- - Number of servers and the projected load
- - Existing metrics expressing requests per second
-
-1. Set the [`limits.request_limits.mode`](/consul/docs/agent/config/config-files#mode-1) parameter in the agent configuration to `permissive`. In the following example, the configuration allows up to 1000 reads and 500 writes per second for each Consul agent:
-
- ```hcl
- request_limits {
- mode = "permissive"
- read_rate = 1000.0
- write_rate = 500.0
- }
- ```
-1. Observe the logs and metrics for your application's typical cycle, such as a 24 hour period. Refer to [Monitor traffic rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits) for additional information. Call the [`/agent/metrics`](/consul/api-docs/agent#view-metrics) HTTP API endpoint and check the data for the following metrics:
-
- - `rpc.rate_limit.exceeded` with value `global/read` for label `limit_type`
- - `rpc.rate_limit.exceeded` with value `global/write` for label `limit_type`
-
-1. If the limits are not reached, set the `mode` configuration to `enforcing`. Otherwise, continue to adjust and iterate until you find your network's unique limits.
\ No newline at end of file
diff --git a/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx b/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx
deleted file mode 100644
index 530ad7b26a7b..000000000000
--- a/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: docs
-page_title: Limit traffic rates for a source IP address
-description: Learn how to set read and request rate limits on RPC and gRPC traffic from all source IP addresses to a Consul resource.
----
-
-# Limit traffic rates from source IP addresses
-
-This topic describes how to configure RPC and gRPC traffic rate limits for source IP addresses. This enables you to specify a budget for read and write requests to prevent any single source IP from overwhelming the Consul server and negatively affecting the network. For information about setting global traffic rate limits, refer to [Set a global limit on traffic rates](/consul/docs/agent/limits/usage/set-global-traffic-rate-limits). For an overview of Consul's server rate limiting capabilities, refer to [Limit traffic rates overview](/consul/docs/agent/limits).
-
-
-
-This feature requires Consul Enterprise. Refer to the [feature compatibility matrix](/consul/docs/enterprise#consul-enterprise-feature-availability) for additional information.
-
-
-
-## Overview
-
-You can set limits on the rate of read and write requests from source IP addresses to specific resources, which mitigates the risks to Consul servers when consul clients send excessive requests to a specific resource type. Before configuring traffic rate limits, you should complete the initialization process to understand normal traffic loads in your network. Refer to [Initialize rate limit settings](/consul/docs/agent/limits/init-rate-limits) for additional information.
-
-Complete the following steps to configure traffic rate limits from a source IP address:
-
-1. Define rate limits in a control plan request limit configuration entry. You can set limits for different types of resources calls.
-
-1. Apply the configuration entry to enact the limits.
-
-You should also monitor read and write rate activity and make any necessary adjustments. Refer to [Monitor rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits) for additional information.
-
-## Define rate limits
-
-Create a control plane request limit configuration entry in the `default` partition. The configuration entry applies to all client requests targeting any partition. Refer to the [control plane request limit configuration entry](/consul/docs/connect/config-entries/control-plane-request-limit) reference documentation for details about the available configuration parameters.
-
-Specify the following parameters:
-
-- `kind`: This must be set to `control-plane-request-limit`.
-- `name`: Specify the name of the service that you want to limit read and write operations to.
-- `read_rate`: Specify overall number of read operations per second allowed from the service.
-- `write_rate`: Specify overall number of write operations per second allowed from the service.
-
-You can also configure limits on calls to the key-value store, ACL system, and Consul catalog.
-
-## Apply the configuration entry
-
-If your network is deployed to virtual machines, use the `consul config write` command and specify the control plane request limit configuration entry to apply the configuration. For Kubernetes-orchestrated networks, use the `kubectl apply` command.
-
-
-
-
-```shell-session
-$ consul config write control-plane-request-limit.hcl
-```
-
-
-
-
-```shell-session
-$ consul config write control-plane-request-limit.json
-```
-
-
-
-
-```shell-session
-$ kubectl apply control-plane-request-limit.yaml
-```
-
-
-
-
-## Disable request rate limits
-
-Set the [limits.request_limits.mode](/consul/docs/agent/config/config-files#mode-1) in the agent configuration to `disabled` to allow services to exceed the specified read and write requests limits. The `disabled` mode applies to all request rate limits, even limits specified in the [control plane request limits configuration entry](/consul/docs/connect/config-entries/control-plane-request-limit). Note that any other mode specified in the agent configuration only applies to global traffic rate limits.
diff --git a/website/content/docs/agent/limits/usage/monitor-rate-limits.mdx b/website/content/docs/agent/limits/usage/monitor-rate-limits.mdx
deleted file mode 100644
index 23502d1cb149..000000000000
--- a/website/content/docs/agent/limits/usage/monitor-rate-limits.mdx
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: docs
-page_title: Monitor traffic rate limit data
-description: Learn about the metrics and logs you can use to monitor server rate limiting activity, include rate limits for read operations and writer operations
----
-
-# Monitor traffic rate limit data
-
-This topic describes Consul functionality that enables you to monitor read and write request operations taking place in your network. Use the functionality to help you understand normal workloads and set safe limits on the number of requests Consul client agents and services can make to Consul servers.
-
-## Access rate limit logs
-
-Consul prints a log line for each rate limit request. The log provides the necessary information for identifying the source of the request and the configured limit. The log provides the information necessary for identifying the source of the request and the configured limit. Consul prints the log `DEBUG` log level and can drop the log to avoid affecting the server health. Dropping a log line increments the `rpc.rate_limit.log_dropped` metric.
-
-The following example log shows that RPC request from `127.0.0.1:53562` to `KVS.Apply` exceeded the limit:
-
-```text
-2023-02-17T10:01:15.565-0500 [DEBUG] agent.server.rpc-rate-limit: RPC
-exceeded allowed rate limit: rpc=KVS.Apply source_addr=127.0.0.1:53562
-limit_type=global/write limit_enforced=false
-```
-
-Refer to [`log_file`](/consul/docs/agent/config/config-files#log_file) for information about where to retrieve log files.
-
-## Review rate limit metrics
-
-Consul captures the following metrics associated with rate limits:
-
-- Type of limit
-- Operation
-- Rate limit mode
-
-Call the `/agent/metrics` API endpoint to view the metrics associated with rate limits. Refer to [View Metrics](/consul/api-docs/agent#view-metrics) for API usage information. In the following example, Consul dropped a call to the consul service because it exceeded the limit by one call:
-
-```shell-session
-$ curl http://127.0.0.1:8500/v1/agent/metrics
-{
- . . .
- "Counters": [
- {
- "Name": "consul.rpc.rate_limit.exceeded",
- "Count": 1,
- "Sum": 1,
- "Min": 1,
- "Max": 1,
- "Mean": 1,
- "Stddev": 0,
- "Labels": {
- "service": "consul"
- }
- },
- {
- "Name": "consul.rpc.rate_limit.log_dropped",
- "Count": 1,
- "Sum": 1,
- "Min": 1,
- "Max": 1,
- "Mean": 1,
- "Stddev": 0,
- "Labels": {}
- }
- ],
- . . .
-}
-```
-
-Refer to [Telemetry](/consul/docs/agent/telemetry) for additional information.
-
-## Request denials
-
-When an HTTP request is denied for rate limiting reason, Consul returns one of the following errors:
-
-- **429 Resource Exhausted**: Indicates that a server is not able to perform the request but that another server could potentially fulfill it. This error is most common on stale reads because any server may fulfill stale read requests. To resolve this type of error, we recommend immediately retrying the request to another server. If the request came from a Consul client agent, the agent automatically retries the request up to the limit set in the [`rpc_hold_timeout`](/consul/docs/agent/config/config-files#rpc_hold_timeout) configuration .
-
-- **503 Service Unavailable**: Indicates that server is unable to perform the request and that no other server can fulfill the request, either. This usually occurs on consistent reads or for writes. In this case we recommend retrying according to an exponential backoff schedule. If the request came from a Consul client agent, the agent automatically retries the request according to the [`rpc_hold_timeout`](/consul/docs/agent/config/config-files#rpc_hold_timeout) configuration.
-
-Refer to [Rate limit reached on the server](/consul/docs/troubleshoot/common-errors#rate-limit-reached-on-the-server) for additional information.
\ No newline at end of file
diff --git a/website/content/docs/agent/limits/usage/set-global-traffic-rate-limits.mdx b/website/content/docs/agent/limits/usage/set-global-traffic-rate-limits.mdx
deleted file mode 100644
index c0afeec9010e..000000000000
--- a/website/content/docs/agent/limits/usage/set-global-traffic-rate-limits.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: docs
-page_title: Set a global limit on traffic rates
-description: Use global rate limits to prevent excessive rates of requests to Consul servers.
----
-
-# Set a global limit on traffic rates
-
-This topic describes how to configure rate limits for RPC and gRPC traffic to the Consul server.
-
-## Introduction
-
-Rate limits apply to each Consul server separately and limit the number of read requests or write requests to the server on the RPC and internal gRPC endpoints.
-
-Because all requests coming to a Consul server eventually perform an RPC or an internal gRPC request, global rate limits apply to Consul's user interfaces, such as the HTTP API interface, the CLI, and the external gRPC endpoint for services in the service mesh.
-
-Refer to [Initialize Rate Limit Settings](/consul/docs/agent/limits/init-rate-limits) for additional information about right-sizing your gRPC request configurations.
-
-## Set a global rate limit for a Consul server
-
-Configure the following settings in your Consul server configuration to limit the RPC and gRPC traffic rates.
-
-- Set the rate limiter [`mode`](/consul/docs/agent/config/config-files#mode-1)
-- Set the [`read_rate`](/consul/docs/agent/config/config-files#read_rate)
-- Set the [`write_rate`](/consul/docs/agent/config/config-files#write_rate)
-
-In the following example, the Consul server is configured to prevent more than `500` read and `200` write RPC calls:
-
-
-
-```hcl
-limits = {
- rate_limit = {
- mode = "enforcing"
- read_rate = 500
- write_rate = 200
- }
-}
-```
-
-```json
-{
- "limits" : {
- "rate_limit" : {
- "mode" : "enforcing",
- "read_rate" : 500,
- "write_rate" : 200
- }
- }
-}
-
-```
-
-
-
-## Monitor request rate traffic
-
-You should continue to monitor request traffic to ensure that request rates remain within the threshold you defined. Refer to [Monitor traffic rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits) for instructions about checking metrics and log entries, as well as troubleshooting information.
-
-## Disable request rate limits
-
-Set the [`limits.request_limits.mode`](/consul/docs/agent/config/config-files#mode-1) to `disabled` to allow services to exceed the specified read and write requests limits, even limits specified in the [control plane request limits configuration entry](/consul/docs/connect/config-entries/control-plane-request-limit). Note that any other mode specified in the agent configuration only applies to global traffic rate limits.
diff --git a/website/content/docs/agent/monitor/components.mdx b/website/content/docs/agent/monitor/components.mdx
deleted file mode 100644
index 1c3d49270e42..000000000000
--- a/website/content/docs/agent/monitor/components.mdx
+++ /dev/null
@@ -1,121 +0,0 @@
----
-layout: docs
-page_title: Monitoring Consul components
-description: >-
- Apply best practices monitoring your Consul control and data plane.
----
-
-# Monitoring Consul components
-
-This document will guide you recommendations for monitoring your Consul control and data plane. By keeping track of these components and setting up alerts, you can better maintain the overall health and resilience of your service mesh.
-
-## Background
-
-A Consul datacenter is the smallest unit of Consul infrastructure that can perform basic Consul operations like service discovery or service mesh. A datacenter contains at least one Consul server agent, but a real-world deployment contains three or five server agents and several Consul client agents.
-
-The Consul control plane consists of server agents that store all state information, including service and node IP addresses, health checks, and configuration. In addition, the control plane is responsible for securing the mesh, facilitating service discovery, health checking, policy enforcement, and other similar operational concerns. In addition, the control plane contains client agents that report node and service health status to the Consul cluster. In a typical deployment, you must run client agents on every compute node in your datacenter.
-
-The Consul data plane consists of proxies deployed locally alongside each service instance. These proxies, called sidecar proxies, receive mesh configuration data from the control plane, and control network communication between their local service instance and other services in the network. The sidecar proxy handles inbound and outbound service connections, and ensures TLS connections between services are both verified and encrypted.
-
-If you have Kubernetes workloads, you can also run Consul with an alternate service mesh configuration that deploys Envoy proxies but not client agents. Refer to [Simplified service mesh with Consul dataplanes](/consul/docs/connect/dataplane) for more information.
-
-## Consul control plane monitoring
-
-The Consul control plane consists of the following components:
-
-- RPC Communication between Consul servers and clients.
-- Data plane routing instructions for the Envoy Layer 7 proxy.
-- Serf Traffic: LAN and WAN
-- Consul cluster peering and server federation
-
-It is important to monitor and establish baseline and alert thresholds for Consul control plane components for abnormal behavior detection. Note that these alerts can also be triggered by some planned events like Consul cluster upgrades, configuration changes, or leadership change.
-
-To help monitor your Consul control plane, we recommend to establish a baseline and standard deviation for the following:
-
-- [Server health](/consul/docs/agent/telemetry#server-health)
-- [Leadership changes](/consul/docs/agent/telemetry#leadership-changes)
-- [Key metrics](/consul/docs/agent/telemetry#key-metrics)
-- [Autopilot](/consul/docs/agent/telemetry#autopilot)
-- [Network activity](/consul/docs/agent/telemetry#network-activity-rpc-count)
-- [Certificate authority expiration](/consul/docs/agent/telemetry#certificate-authority-expiration)
-
-It is important to have a highly performant network with low network latency. Ensure network latency for gossip in all datacenters are within the 8ms latency budget for all Consul agents. View the [Production server requirements](/consul/docs/install/performance#production-server-requirements) for more information.
-
-### Raft recommendations
-
-Consul uses [Raft for consensus protocol](/consul/docs/architecture/consensus). High saturation of the Raft goroutines can lead to elevated latency in the rest of the system and may cause the Consul cluster to be unstable. As a result, it is important to monitor Raft to track your control plane health. We recommend the following actions to keep control plane healthy:
-- Create an alert that notifies you when [Raft thread saturation](/consul/docs/agent/telemetry#raft-thread-saturation) exceeds 50%.
-- Monitor [Raft replication capacity](/consul/docs/agent/telemetry#raft-replication-capacity-issues) when Consul is handling large amounts of data and high write throughput.
-- Lower [`raft_multiplier`](/consul/docs/install/performance#production) to keep your Consul cluster stable. The value of `raft_multiplier` defines the scaling factor for Consul. Default value for raft_multiplier is 5.
-
- A short multiplier minimizes failure detection and election time but may trigger frequently in high latency situations. This can cause constant leadership churn and associated unavailability. A high multiplier reduces the chances that spurious failures will cause leadership churn but it does this at the expense of taking longer to detect real failures and thus takes longer to restore Consul cluster availability.
-
- Wide networks with higher latency will perform better with larger `raft_multiplier` values.
-
-Raft uses BoltDB for storing data and maintaining its own state. Refer to the [Bolt DB performance metrics](/consul/docs/agent/telemetry#bolt-db-performance) when you are troubleshooting Raft performance issues.
-
-## Consul data plane monitoring
-
-The data plane of Consul consists of Consul clients or [Connect proxies](/consul/docs/connect/proxies) interacting with each other through service-to-service communication. Service-to-service traffic always stays within the data plane, while the control plane only enforces traffic rules. Monitoring service-to-service communication is important but may become extremely complex in an enterprise setup with multiple services communicating to each other across federated Consul clusters through mesh, ingress and terminating gateways.
-
-### Service monitoring
-
-You can extract the following service-related information:
-
-- Use the [`catalog`](/consul/commands/catalog) command or the Consul UI to query all registered services in a Consul datacenter.
-- Use the [`/agent/service/:service_id`](/consul/api-docs/agent/service#get-service-configuration) API endpoint to query individual services. Connect proxies use this endpoint to discover embedded configuration.
-
-### Proxy monitoring
-
-Envoy is the supported Connect proxy for Consul service mesh. For virtual machines (VMs), Envoy starts as a sidecar service process. For Kubernetes, Envoy starts as a sidecar container in a Kubernetes service pod.
-Refer to the [Supported Envoy versions](/consul/docs/connect/proxies/envoy#supported-versions) documentation to find the compatible Envoy versions for your version of Consul.
-
-For troubleshooting service mesh issues, set Consul logs to `trace` or `debug`. The following example annotation sets Envoy logging to `debug`.
-
-```yaml
-annotations:
- consul.hashicorp.com/envoy-extra-args: '--log-level debug --disable-hot-restart'
-```
-
-Refer to the [Enable logging on Envoy sidecar pods](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-envoy-extra-args) documentation for more information.
-
-#### Envoy Admin Interface
-
-To troubleshoot service-to-service communication issues, monitor Envoy host statistics. Envoy exposes a local administration interface that can be used to query and modify different aspects of the server on port `19000` by default. Envoy also exposes a public listener port to receive mTLS connections from other proxies in the mesh on port `20000` by default.
-
-All endpoints exposed by Envoy are available at the node running Envoy on port `19000`. The node can either be a pod in Kubernetes or VM running Consul Service Mesh. For example, if you forward the Envoy port to your local machine, you can access the Envoy admin interface at `http://localhost:19000/`.
-
-The following Envoy admin interface endpoints are particularly useful:
-
-- The `listeners` endpoint lists all listeners running on `localhost`. This allows you to confirm whether the upstream services are binding correctly to Envoy.
-
-```shell-session
-$ curl http://localhost:19000/listeners
-public_listener:192.168.19.168:20000::192.168.19.168:20000
-Outbound_listener:127.0.0.1:15001::127.0.0.1:15001
-```
-
-- The `/clusters` endpoint displays information about the xDS clusters, such as service requests and mTLS related data. The following example shows a truncated output.
-
-```shell-session
-$ http://localhost:19000/clusters
-`local_app::observability_name::local_app
-local_app::default_priority::max_connections::1024
-local_app::default_priority::max_pending_requests::1024
-local_app::default_priority::max_requests::1024
-local_app::default_priority::max_retries::3
-local_app::high_priority::max_connections::1024
-local_app::high_priority::max_pending_requests::1024
-local_app::high_priority::max_requests::1024
-local_app::high_priority::max_retries::3
-local_app::added_via_api::true
-## ...
-```
-
-Visit the main admin interface (`http://localhost:19000`) to find the full list of possible Consul admin endpoints. Refer to the [Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) for more information.
-
-## Next steps
-
-In this guide, you learned recommendations for monitoring your Consul control and data plane.
-
-To learn about monitoring the Consul host and instance resources, visit our [Monitoring best practices](/well-architected-framework/reliability/reliability-monitoring-service-to-service-communication-with-envoy) documentation.
diff --git a/website/content/docs/agent/monitor/telemetry.mdx b/website/content/docs/agent/monitor/telemetry.mdx
deleted file mode 100644
index 322ec40997d1..000000000000
--- a/website/content/docs/agent/monitor/telemetry.mdx
+++ /dev/null
@@ -1,809 +0,0 @@
----
-layout: docs
-page_title: Agents - Enable Telemetry Metrics
-description: >-
- Configure agent telemetry to collect operations metrics you can use to debug and observe Consul behavior and performance. Learn about configuration options, the metrics you can collect, and why they're important.
----
-
-# Agent Telemetry
-
-The Consul agent collects various runtime metrics about the performance of
-different libraries and subsystems. These metrics are aggregated on a ten
-second (10s) interval and are retained for one minute. An _interval_ is the period of time between instances of data being collected and aggregated.
-
-When telemetry is being streamed to an external metrics store, the interval is defined to be that store's flush interval.
-
-|External Store|Interval (seconds)|
-|:--------|:--------|
-|[dogstatsd](https://docs.datadoghq.com/developers/dogstatsd/?tab=hostagent#how-it-works)|10s|
-|[Prometheus](https://vector.dev/docs/reference/configuration/sinks/prometheus_exporter/#flush_period_secs)| 60s|
-|[statsd](https://github.com/statsd/statsd/blob/master/docs/metric_types.md#timing)|10s|
-
-To view this data, you must send a signal to the Consul process: on Unix,
-this is `USR1` while on Windows it is `BREAK`. Once Consul receives the signal,
-it will dump the current telemetry information to the agent's `stderr`.
-
-This telemetry information can be used for debugging or otherwise
-getting a better view of what Consul is doing. Review the [Monitoring and
-Metrics tutorial](/consul/tutorials/day-2-operations/monitor-datacenter-health?utm_source=docs) to learn how collect and interpret Consul data.
-
-By default, all metric names of gauge type are prefixed with the hostname of the consul agent, e.g.,
-`consul.hostname.server.isLeader`. To disable prefixing the hostname, set
-`telemetry.disable_hostname=true` in the [agent configuration](/consul/docs/agent/config/config-files#telemetry).
-
-Additionally, if the [`telemetry` configuration options](/consul/docs/agent/config/config-files#telemetry)
-are provided, the telemetry information will be streamed to a
-[statsite](http://github.com/armon/statsite) or [statsd](http://github.com/etsy/statsd) server where
-it can be aggregated and flushed to Graphite or any other metrics store.
-For a configuration example for Telegraf, review the [Monitoring with Telegraf tutorial](/consul/tutorials/day-2-operations/monitor-health-telegraf?utm_source=docs).
-
-This
-information can also be viewed with the [metrics endpoint](/consul/api-docs/agent#view-metrics) in JSON
-format or using [Prometheus](https://prometheus.io/) format.
-
-
-
-```log
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.num_goroutines': 19.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.alloc_bytes': 755960.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.malloc_count': 7550.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.free_count': 4387.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.heap_objects': 3163.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.total_gc_pause_ns': 1151002.000
-[2014-01-29 10:56:50 -0800 PST][G] 'consul-agent.runtime.total_gc_runs': 4.000
-[2014-01-29 10:56:50 -0800 PST][C] 'consul-agent.agent.ipc.accept': Count: 5 Sum: 5.000
-[2014-01-29 10:56:50 -0800 PST][C] 'consul-agent.agent.ipc.command': Count: 10 Sum: 10.000
-[2014-01-29 10:56:50 -0800 PST][C] 'consul-agent.serf.events': Count: 5 Sum: 5.000
-[2014-01-29 10:56:50 -0800 PST][C] 'consul-agent.serf.events.foo': Count: 4 Sum: 4.000
-[2014-01-29 10:56:50 -0800 PST][C] 'consul-agent.serf.events.baz': Count: 1 Sum: 1.000
-[2014-01-29 10:56:50 -0800 PST][S] 'consul-agent.memberlist.gossip': Count: 50 Min: 0.007 Mean: 0.020 Max: 0.041 Stddev: 0.007 Sum: 0.989
-[2014-01-29 10:56:50 -0800 PST][S] 'consul-agent.serf.queue.Intent': Count: 10 Sum: 0.000
-[2014-01-29 10:56:50 -0800 PST][S] 'consul-agent.serf.queue.Event': Count: 10 Min: 0.000 Mean: 2.500 Max: 5.000 Stddev: 2.121 Sum: 25.000
-```
-
-
-
-# Key Metrics
-
-These are some metrics emitted that can help you understand the health of your cluster at a glance. A [Grafana dashboard](https://grafana.com/grafana/dashboards/13396) is also available, which is maintained by the Consul team and displays these metrics for easy visualization. For a full list of metrics emitted by Consul, see [Metrics Reference](#metrics-reference)
-
-### Transaction timing
-
-| Metric Name | Description | Unit | Type |
-| :----------------------- | :----------------------------------------------------------------------------------- | :--------------------------- | :------ |
-| `consul.kvs.apply` | Measures the time it takes to complete an update to the KV store. | ms | timer |
-| `consul.txn.apply` | Measures the time spent applying a transaction operation. | ms | timer |
-| `consul.raft.apply` | Counts the number of Raft transactions applied during the interval. This metric is only reported on the leader. | raft transactions / interval | counter |
-| `consul.raft.commitTime` | Measures the time it takes to commit a new entry to the Raft log on the leader. | ms | timer |
-
-**Why they're important:** Taken together, these metrics indicate how long it takes to complete write operations in various parts of the Consul cluster. Generally these should all be fairly consistent and no more than a few milliseconds. Sudden changes in any of the timing values could be due to unexpected load on the Consul servers, or due to problems on the servers themselves.
-
-**What to look for:** Deviations (in any of these metrics) of more than 50% from baseline over the previous hour.
-
-### Leadership changes
-
-| Metric Name | Description | Unit | Type |
-| :------------------------------- | :------------------------------------------------------------------------------------------------------------- | :-------- | :------ |
-| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. | ms | timer |
-| `consul.raft.state.candidate` | Increments whenever a Consul server starts an election. | elections | counter |
-| `consul.raft.state.leader` | Increments whenever a Consul server becomes a leader. | leaders | counter |
-| `consul.server.isLeader` | Track if a server is a leader(1) or not(0). | 1 or 0 | gauge |
-
-**Why they're important:** Normally, your Consul cluster should have a stable leader. If there are frequent elections or leadership changes, it would likely indicate network issues between the Consul servers, or that the Consul servers themselves are unable to keep up with the load.
-
-**What to look for:** For a healthy cluster, you're looking for a `lastContact` lower than 200ms, `leader` > 0 and `candidate` == 0. Deviations from this might indicate flapping leadership.
-
-### Certificate Authority Expiration
-
-| Metric Name | Description | Unit | Type |
-| :------------------------- | :---------------------------------------------------------------------------------- | :------ | :---- |
-| `consul.mesh.active-root-ca.expiry` | The number of seconds until the root CA expires, updated every hour. | seconds | gauge |
-| `consul.mesh.active-signing-ca.expiry` | The number of seconds until the signing CA expires, updated every hour. | seconds | gauge |
-| `consul.agent.tls.cert.expiry` | The number of seconds until the server agent's TLS certificate expires, updated every hour. | seconds | gauge |
-
-** Why they're important:** Consul Mesh requires a CA to sign all certificates
-used to connect the mesh and the mesh network ceases to work if they expire and
-become invalid. The Root is particularly important to monitor as Consul does
-not automatically rotate it. The TLS certificate metric monitors the certificate
-that the server's agent uses to connect with the other agents in the cluster.
-
-** What to look for:** The Root CA should be monitored for an approaching
-expiration, to indicate it is time for you to rotate the "root" CA either
-manually or with external automation. Consul should rotate the signing (intermediate) certificate
-automatically, but we recommend monitoring the rotation. When the certificate does not rotate, check the server agent logs for
-messages related to the CA system. The agent TLS certificate's rotation handling
-varies based on the configuration.
-
-### Autopilot
-
-| Metric Name | Description | Unit | Type |
-| :------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------- | :---- |
-| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | health state | gauge |
-
-**Why it's important:** Autopilot can expose the overall health of your cluster with a simple boolean.
-
-**What to look for:** Alert if `healthy` is 0. Some other indicators of an unhealthy cluster would be:
-- `consul.raft.commitTime` - This can help reflect the speed of state store
-changes being performed by the agent. If this number is rising, the server may
-be experiencing an issue due to degraded resources on the host.
-- [Leadership change metrics](#leadership-changes) - Check for deviation from
-the recommended values. This can indicate failed leadership elections or
-flapping nodes.
-
-### Memory usage
-
-| Metric Name | Description | Unit | Type |
-| :--------------------------- | :----------------------------------------------------------------- | :---- | :---- |
-| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. | bytes | gauge |
-| `consul.runtime.sys_bytes` | Measures the total number of bytes of memory obtained from the OS. | bytes | gauge |
-
-**Why they're important:** Consul keeps all of its data in memory. If Consul consumes all available memory, it will crash.
-
-**What to look for:** If `consul.runtime.sys_bytes` exceeds 90% of total available system memory.
-
-**NOTE:** This metric is calculated using Go's runtime package
-[MemStats](https://golang.org/pkg/runtime/#MemStats). This will have a
-different output than using information gathered from `top`. For more
-information, see [GH-4734](https://github.com/hashicorp/consul/issues/4734).
-
-### Garbage collection
-
-| Metric Name | Description | Unit | Type |
-| :--------------------------------- | :---------------------------------------------------------------------------------------------------- | :--- | :---- |
-| `consul.runtime.total_gc_pause_ns` | Number of nanoseconds consumed by stop-the-world garbage collection (GC) pauses since Consul started. | ns | gauge |
-
-**Why it's important:** GC pause is a "stop-the-world" event, meaning that all runtime threads are blocked until GC completes. Normally these pauses last only a few nanoseconds. But if memory usage is high, the Go runtime may GC so frequently that it starts to slow down Consul.
-
-**What to look for:** Warning if `total_gc_pause_ns` exceeds 2 seconds/minute, critical if it exceeds 5 seconds/minute.
-
-**NOTE:** `total_gc_pause_ns` is a cumulative counter, so in order to calculate rates (such as GC/minute),
-you will need to apply a function such as InfluxDB's [`non_negative_difference()`](https://docs.influxdata.com/influxdb/v1.5/query_language/functions/#non-negative-difference).
-
-### Network activity - RPC Count
-
-| Metric Name | Description | Unit | Type |
-| :--------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | :------ |
-| `consul.client.rpc` | Increments whenever a Consul agent makes an RPC request to a Consul server | requests | counter |
-| `consul.client.rpc.exceeded` | Increments whenever a Consul agent makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/consul/docs/agent/config/config-files#limits) configuration. | requests | counter |
-| `consul.client.rpc.failed` | Increments whenever a Consul agent makes an RPC request to a Consul server and fails. | requests | counter |
-
-**Why they're important:** These measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. A high RPC count, especially from `consul.client.rpcexceeded` meaning that the requests are being rate-limited, could imply a misconfigured Consul agent.
-
-**What to look for:**
-Sudden large changes to the `consul.client.rpc` metrics (greater than 50% deviation from baseline).
-`consul.client.rpc.exceeded` or `consul.client.rpc.failed` count > 0, as it implies that an agent is being rate-limited or fails to make an RPC request to a Consul server
-
-### Raft Thread Saturation
-
-| Metric Name | Description | Unit | Type |
-| :----------------------------------- | :----------------------------------------------------------------------------------------------------------------------- | :--------- | :----- |
-| `consul.raft.thread.main.saturation` | An approximate measurement of the proportion of time the main Raft goroutine is busy and unavailable to accept new work. | percentage | sample |
-| `consul.raft.thread.fsm.saturation` | An approximate measurement of the proportion of time the Raft FSM goroutine is busy and unavailable to accept new work. | percentage | sample |
-
-**Why they're important:** These measurements are a useful proxy for how much
-capacity a Consul server has to accept additional write load. High saturation
-of the Raft goroutines can lead to elevated latency in the rest of the system
-and cause cluster instability.
-
-**What to look for:** Generally, a server's steady-state saturation should be
-less than 50%.
-
-**NOTE:** These metrics are approximate and under extremely heavy load won't
-give a perfect fine-grained view of how much headroom a server has available.
-Instead, treat them as an early warning sign.
-
-** Requirements: **
-* Consul 1.13.0+
-
-### Raft Replication Capacity Issues
-
-| Metric Name | Description | Unit | Type |
-| :--------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | :------ |
-| `consul.raft.fsm.lastRestoreDuration` | Measures the time taken to restore the FSM from a snapshot on an agent restart or from the leader calling installSnapshot. This is a gauge that holds it's value since most servers only restore during restarts which are typically infrequent. | ms | gauge |
-| `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. | ms | gauge |
-| `consul.raft.rpc.installSnapshot` | Measures the time taken to process the installSnapshot RPC call. This metric should only be seen on agents which are currently in the follower state. | ms | timer |
-
-**Why they're important:** These metrics allow operators to monitor the health
-and capacity of raft replication on servers. **When Consul is handling large
-amounts of data and high write throughput** it is possible for the cluster to
-get into the following state:
- * Write throughput is high (say 500 commits per second or more) and constant
- * The leader is writing out a large snapshot every minute or so
- * The snapshot is large enough that it takes considerable time to restore from
- disk on a restart or from the leader if a follower gets behind
- * Disk IO available allows the leader to write a snapshot faster than it can be
- restored from disk on a follower
-
-Under these conditions, a follower after a restart may be unable to catch up on
-replication and become a voter again since it takes longer to restore from disk
-or the leader than the leader takes to write a new snapshot and truncate its
-logs. Servers retain
-[`raft_trailing_logs`](/consul/docs/agent/config/config-files#raft_trailing_logs) (default
-`10240`) log entries even if their snapshot was more recent. On a leader
-processing 500 commits/second, that is only about 20 seconds worth of logs.
-Assuming the leader is able to write out a snapshot and truncate the logs in
-less than 20 seconds, there will only be 20 seconds worth of "recent" logs
-available on the leader right after the leader has taken a snapshot and never
-more than about 80 seconds worth assuming it is taking a snapshot and truncating
-logs every 60 seconds.
-
-In this state, followers must be able to restore a snapshot into memory and
-resume replication in under 80 seconds otherwise they will never be able to
-rejoin the cluster until write rates reduce. If they take more than 20 seconds
-then there will be a chance that they are unlucky with timing when they restart
-and have to download a snapshot again from the servers one or more times. If
-they take 50 seconds or more then they will likely fail to catch up more often
-than they succeed and will remain non-voters for some time until they happen to
-complete the restore just before the leader truncates its logs.
-
-In the worst case, the follower will be left continually downloading snapshots
-from the leader which are always too old to use by the time they are restored.
-This can put additional strain on the leader transferring large snapshots
-repeatedly as well as reduce the fault tolerance and serving capacity of the
-cluster.
-
-Since Consul 1.5.3
-[`raft_trailing_logs`](/consul/docs/agent/config/config-files#raft_trailing_logs) has been
-configurable. Increasing it allows the leader to retain more logs and give
-followers more time to restore and catch up. The tradeoff is potentially
-slower appends which eventually might affect write throughput and latency
-negatively so setting it arbitrarily high is not recommended. Before Consul
-1.10.0 it required a rolling restart to change this configuration on the leader
-though and since no followers could restart without loosing health this could
-mean loosing cluster availability and needing to recover the cluster from a loss
-of quorum.
-
-Since Consul 1.10.0
-[`raft_trailing_logs`](/consul/docs/agent/config/config-files#raft_trailing_logs) is now
-reloadable with `consul reload` or `SIGHUP` allowing operators to increase this
-without the leader restarting or loosing leadership allowing the cluster to be
-recovered gracefully.
-
-Monitoring these metrics can help avoid or diagnose this state.
-
-**What to look for:**
-
-`consul.raft.leader.oldestLogAge` should look like a saw-tooth wave increasing
-linearly with time until the leader takes a snapshot and then jumping down as
-the oldest logs are truncated. The lowest point on that line should remain
-comfortably higher (i.e. 2x or more) than the time it takes to restore a
-snapshot.
-
-There are two ways a snapshot can be restored on a follower: from disk on
-startup or from the leader during an `installSnapshot` RPC. The leader only
-sends an `installSnapshot` RPC if the follower is new and has no state, or if
-it's state is too old for it to catch up with the leaders logs.
-
-`consul.raft.fsm.lastRestoreDuration` shows the time it took to restore from
-either source the last time it happened. Most of the time this is when the
-server was started. It's a gauge that will always show the last restore duration
-(in Consul 1.10.0 and later) however long ago that was.
-
-`consul.raft.rpc.installSnapshot` is the timing information from the leader's
-perspective when it installs a new snapshot on a follower. It includes the time
-spent transferring the data as well as the follower restoring it. Since these
-events are typically infrequent, you may need to graph the last value observed,
-for example using `max_over_time` with a large range in Prometheus. While the
-restore part will also be reflected in `lastRestoreDuration`, it can be useful
-to observe this too since the logs need to be able to cover this entire
-operation including the snapshot delivery to ensure followers can always catch
-up safely.
-
-Graphing `consul.raft.leader.oldestLogAge` on the same axes as the other two
-metrics here can help see at a glance if restore times are creeping dangerously
-close to the limit of what the leader is retaining at the current write rate.
-
-Note that if servers don't restart often, then the snapshot could have grown
-significantly since the last restore happened so last restore times might not
-reflect what would happen if an agent restarts now.
-
-### License Expiration
-
-| Metric Name | Description | Unit | Type |
-| :-------------------------------- | :--------------------------------------------------------------- | :---- | :---- |
-| `consul.system.licenseExpiration` | Number of hours until the Consul Enterprise license will expire. | hours | gauge |
-
-**Why they're important:**
-
-This measurement indicates how many hours are left before the Consul Enterprise license expires. When the license expires some
-Consul Enterprise features will cease to work. An example of this is that after expiration, it is no longer possible to create
-or modify resources in non-default namespaces or to manage namespace definitions themselves even though reads of namespaced
-resources will still work.
-
-**What to look for:**
-
-This metric should be monitored to ensure that the license doesn't expire to prevent degradation of functionality.
-
-
-### Bolt DB Performance
-
-| Metric Name | Description | Unit | Type |
-| :-------------------------------- | :--------------------------------------------------------------- | :---- | :---- |
-| `consul.raft.boltdb.freelistBytes` | Represents the number of bytes necessary to encode the freelist metadata. When [`raft_logstore.boltdb.no_freelist_sync`](/consul/docs/agent/config/config-files#raft_logstore_boltdb_no_freelist_sync) is set to `false` these metadata bytes must also be written to disk for each committed log. | bytes | gauge |
-| `consul.raft.boltdb.logsPerBatch` | Measures the number of logs being written per batch to the db. | logs | sample |
-| `consul.raft.boltdb.storeLogs` | Measures the amount of time spent writing logs to the db. | ms | timer |
-| `consul.raft.boltdb.writeCapacity` | Theoretical write capacity in terms of the number of logs that can be written per second. Each sample outputs what the capacity would be if future batched log write operations were similar to this one. This similarity encompasses 4 things: batch size, byte size, disk performance and boltdb performance. While none of these will be static and its highly likely individual samples of this metric will vary, aggregating this metric over a larger time window should provide a decent picture into how this BoltDB store can perform | logs/second | sample |
-
-
-** Requirements: **
-* Consul 1.11.0+
-
-**Why they're important:**
-
-The `consul.raft.boltdb.storeLogs` metric is a direct indicator of disk write performance of a Consul server. If there are issues with the disk or
-performance degradations related to Bolt DB, these metrics will show the issue and potentially the cause as well.
-
-**What to look for:**
-
-The primary thing to look for are increases in the `consul.raft.boltdb.storeLogs` times. Its value will directly govern an
-upper limit to the throughput of write operations within Consul.
-
-In Consul each write operation will turn into a single Raft log to be committed. Raft will process these
-logs and store them within Bolt DB in batches. Each call to store logs within Bolt DB is measured to record how long
-it took as well as how many logs were contained in the batch. Writing logs in this fashion is serialized so that
-a subsequent log storage operation can only be started after the previous one completed. The maximum number
-of log storage operations that can be performed each second is represented with the `consul.raft.boltdb.writeCapacity`
-metric. When log storage operations are becoming slower you may not see an immediate decrease in write capacity
-due to increased batch sizes of the each operation. However, the max batch size allowed is 64 logs. Therefore if
-the `logsPerBatch` metric is near 64 and the `storeLogs` metric is seeing increased time to write each batch to disk,
-then it is likely that increased write latencies and other errors may occur.
-
-There can be a number of potential issues that can cause this. Often times it could be performance of the underlying
-disks that is the issue. Other times it may be caused by Bolt DB behavior. Bolt DB keeps track of free space within
-the `raft.db` file. When needing to allocate data it will use existing free space first before further expanding the
-file. By default, Bolt DB will write a data structure containing metadata about free pages within the DB to disk for
-every log storage operation. Therefore if the free space within the database grows excessively large, such as after
-a large spike in writes beyond the normal steady state and a subsequent slow down in the write rate, then Bolt DB
-could end up writing a large amount of extra data to disk for each log storage operation. This has the potential
-to drastically increase disk write throughput, potentially beyond what the underlying disks can keep up with. To
-detect this situation you can look at the `consul.raft.boltdb.freelistBytes` metric. This metric is a count of
-the extra bytes that are being written for each log storage operation beyond the log data itself. While not a clear
-indicator of an actual issue, this metric can be used to diagnose why the `consul.raft.boltdb.storeLogs` metric
-is high.
-
-If Bolt DB log storage performance becomes an issue and is caused by free list management then setting
-[`raft_logstore.boltdb.no_freelist_sync`](/consul/docs/agent/config/config-files#raft_logstore_boltdb_no_freelist_sync) to `true` in the server's configuration
-may help to reduce disk IO and log storage operation times. Disabling free list syncing will however increase
-the startup time for a server as it must scan the raft.db file for free space instead of loading the already
-populated free list structure.
-
-Consul includes an experiment backend configuration that you can use instead of BoldDB. Refer to [Experimental WAL LogStore backend](/consul/docs/agent/wal-logstore) for more information.
-
-## Metrics Reference
-
-This is a full list of metrics emitted by Consul.
-
-| Metric | Description | Unit | Type |
-|--------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|---------|
-| `consul.acl.blocked.{check,service}.deregistration` | Increments whenever a deregistration fails for an entity (check or service) is blocked by an ACL. | requests | counter |
-| `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter |
-| `consul.api.http` | This samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer |
-| `consul.client.rpc` | Increments whenever a Consul agent makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter |
-| `consul.client.rpc.exceeded` | Increments whenever a Consul agent makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/consul/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter |
-| `consul.client.rpc.failed` | Increments whenever a Consul agent makes an RPC request to a Consul server and fails. | requests | counter |
-| `consul.client.api.catalog_register` | Increments whenever a Consul agent receives a catalog register request. | requests | counter |
-| `consul.client.api.success.catalog_register` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter |
-| `consul.client.rpc.error.catalog_register` | Increments whenever a Consul agent receives an RPC error for a catalog register request. | errors | counter |
-| `consul.client.api.catalog_deregister` | Increments whenever a Consul agent receives a catalog deregister request. | requests | counter |
-| `consul.client.api.success.catalog_deregister` | Increments whenever a Consul agent successfully responds to a catalog deregister request. | requests | counter |
-| `consul.client.rpc.error.catalog_deregister` | Increments whenever a Consul agent receives an RPC error for a catalog deregister request. | errors | counter |
-| `consul.client.api.catalog_datacenters` | Increments whenever a Consul agent receives a request to list datacenters in the catalog. | requests | counter |
-| `consul.client.api.success.catalog_datacenters` | Increments whenever a Consul agent successfully responds to a request to list datacenters. | requests | counter |
-| `consul.client.rpc.error.catalog_datacenters` | Increments whenever a Consul agent receives an RPC error for a request to list datacenters. | errors | counter |
-| `consul.client.api.catalog_nodes` | Increments whenever a Consul agent receives a request to list nodes from the catalog. | requests | counter |
-| `consul.client.api.success.catalog_nodes` | Increments whenever a Consul agent successfully responds to a request to list nodes. | requests | counter |
-| `consul.client.rpc.error.catalog_nodes` | Increments whenever a Consul agent receives an RPC error for a request to list nodes. | errors | counter |
-| `consul.client.api.catalog_services` | Increments whenever a Consul agent receives a request to list services from the catalog. | requests | counter |
-| `consul.client.api.success.catalog_services` | Increments whenever a Consul agent successfully responds to a request to list services. | requests | counter |
-| `consul.client.rpc.error.catalog_services` | Increments whenever a Consul agent receives an RPC error for a request to list services. | errors | counter |
-| `consul.client.api.catalog_service_nodes` | Increments whenever a Consul agent receives a request to list nodes offering a service. | requests | counter |
-| `consul.client.api.success.catalog_service_nodes` | Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. | requests | counter |
-| `consul.client.api.error.catalog_service_nodes` | Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. | requests | counter |
-| `consul.client.rpc.error.catalog_service_nodes` | Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service. | errors | counter |
-| `consul.client.api.catalog_node_services` | Increments whenever a Consul agent receives a request to list services registered in a node. | requests | counter |
-| `consul.client.api.success.catalog_node_services` | Increments whenever a Consul agent successfully responds to a request to list services in a node. | requests | counter |
-| `consul.client.rpc.error.catalog_node_services` | Increments whenever a Consul agent receives an RPC error for a request to list services in a node. | errors | counter |
-| `consul.client.api.catalog_node_service_list` | Increments whenever a Consul agent receives a request to list a node's registered services. | requests | counter |
-| `consul.client.rpc.error.catalog_node_service_list` | Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. | errors | counter |
-| `consul.client.api.success.catalog_node_service_list` | Increments whenever a Consul agent successfully responds to a request to list a node's registered services. | requests | counter |
-| `consul.client.api.catalog_gateway_services` | Increments whenever a Consul agent receives a request to list services associated with a gateway. | requests | counter |
-| `consul.client.api.success.catalog_gateway_services` | Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. | requests | counter |
-| `consul.client.rpc.error.catalog_gateway_services` | Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. | errors | counter |
-| `consul.runtime.num_goroutines` | Tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge |
-| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge |
-| `consul.runtime.heap_objects` | Measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge |
-| `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
-| `consul.state.peerings` | Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. | number of objects | gauge |
-| `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
-| `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge |
-| `consul.state.kv_entries` | Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge |
-| `consul.state.connect_instances` | Measures the current number of unique mesh service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge |
-| `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/consul/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge |
-| `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge |
-| `consul.members.servers` | Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of servers | gauge |
-| `consul.dns.stale_queries` | Increments when an agent serves a query within the allowed stale threshold. | queries | counter |
-| `consul.dns.ptr_query` | Measures the time spent handling a reverse DNS query for the given node. | ms | timer |
-| `consul.dns.domain_query` | Measures the time spent handling a domain query for the given node. | ms | timer |
-| `consul.system.licenseExpiration` | This measures the number of hours remaining on the agents license. | hours | gauge |
-| `consul.version` | Represents the Consul version. | agents | gauge |
-
-## Server Health
-
-These metrics are used to monitor the health of the Consul servers.
-
-| Metric | Description | Unit | Type |
-|-----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------|---------|
-| `consul.acl.ResolveToken` | Measures the time it takes to resolve an ACL token. | ms | timer |
-| `consul.acl.ResolveTokenToIdentity` | Measures the time it takes to resolve an ACL token to an Identity. This metric was removed in Consul 1.12. The time will now be reflected in `consul.acl.ResolveToken`. | ms | timer |
-| `consul.acl.token.cache_hit` | Increments if Consul is able to resolve a token's identity from the cache. | cache read op | counter |
-| `consul.acl.token.cache_miss` | Increments if Consul cannot resolve a token's identity from the cache. | cache read op | counter |
-| `consul.cache.bypass` | Counts how many times a request bypassed the cache because no cache-key was provided. | counter | counter |
-| `consul.cache.fetch_success` | Counts the number of successful fetches by the cache. | counter | counter |
-| `consul.cache.fetch_error` | Counts the number of failed fetches by the cache. | counter | counter |
-| `consul.cache.evict_expired` | Counts the number of expired entries that are evicted. | counter | counter |
-| `consul.raft.applied_index` | Represents the raft applied index. | index | gauge |
-| `consul.raft.apply` | Counts the number of Raft transactions occurring over the interval, which is a general indicator of the write load on the Consul servers. | raft transactions / interval | counter |
-| `consul.raft.barrier` | Counts the number of times the agent has started the barrier i.e the number of times it has issued a blocking call, to ensure that the agent has all the pending operations that were queued, to be applied to the agent's FSM. | blocks / interval | counter |
-| `consul.raft.boltdb.freelistBytes` | Represents the number of bytes necessary to encode the freelist metadata. When [`raft_logstore.boltdb.no_freelist_sync`](/consul/docs/agent/config/config-files#raft_logstore_boltdb_no_freelist_sync) is set to `false` these metadata bytes must also be written to disk for each committed log. | bytes | gauge |
-| `consul.raft.boltdb.freePageBytes` | Represents the number of bytes of free space within the raft.db file. | bytes | gauge |
-| `consul.raft.boltdb.getLog` | Measures the amount of time spent reading logs from the db. | ms | timer |
-| `consul.raft.boltdb.logBatchSize` | Measures the total size in bytes of logs being written to the db in a single batch. | bytes | sample |
-| `consul.raft.boltdb.logsPerBatch` | Measures the number of logs being written per batch to the db. | logs | sample |
-| `consul.raft.boltdb.logSize` | Measures the size of logs being written to the db. | bytes | sample |
-| `consul.raft.boltdb.numFreePages` | Represents the number of free pages within the raft.db file. | pages | gauge |
-| `consul.raft.boltdb.numPendingPages` | Represents the number of pending pages within the raft.db that will soon become free. | pages | gauge |
-| `consul.raft.boltdb.openReadTxn` | Represents the number of open read transactions against the db | transactions | gauge |
-| `consul.raft.boltdb.totalReadTxn` | Represents the total number of started read transactions against the db | transactions | gauge |
-| `consul.raft.boltdb.storeLogs` | Measures the amount of time spent writing logs to the db. | ms | timer |
-| `consul.raft.boltdb.txstats.cursorCount` | Counts the number of cursors created since Consul was started. | cursors | counter |
-| `consul.raft.boltdb.txstats.nodeCount` | Counts the number of node allocations within the db since Consul was started. | allocations | counter |
-| `consul.raft.boltdb.txstats.nodeDeref` | Counts the number of node dereferences in the db since Consul was started. | dereferences | counter |
-| `consul.raft.boltdb.txstats.pageAlloc` | Represents the number of bytes allocated within the db since Consul was started. Note that this does not take into account space having been freed and reused. In that case, the value of this metric will still increase. | bytes | gauge |
-| `consul.raft.boltdb.txstats.pageCount` | Represents the number of pages allocated since Consul was started. Note that this does not take into account space having been freed and reused. In that case, the value of this metric will still increase. | pages | gauge |
-| `consul.raft.boltdb.txstats.rebalance` | Counts the number of node rebalances performed in the db since Consul was started. | rebalances | counter |
-| `consul.raft.boltdb.txstats.rebalanceTime` | Measures the time spent rebalancing nodes in the db. | ms | timer |
-| `consul.raft.boltdb.txstats.spill` | Counts the number of nodes spilled in the db since Consul was started. | spills | counter |
-| `consul.raft.boltdb.txstats.spillTime` | Measures the time spent spilling nodes in the db. | ms | timer |
-| `consul.raft.boltdb.txstats.split` | Counts the number of nodes split in the db since Consul was started. | splits | counter |
-| `consul.raft.boltdb.txstats.write` | Counts the number of writes to the db since Consul was started. | writes | counter |
-| `consul.raft.boltdb.txstats.writeTime` | Measures the amount of time spent performing writes to the db. | ms | timer |
-| `consul.raft.boltdb.writeCapacity` | Theoretical write capacity in terms of the number of logs that can be written per second. Each sample outputs what the capacity would be if future batched log write operations were similar to this one. This similarity encompasses 4 things: batch size, byte size, disk performance and boltdb performance. While none of these will be static and its highly likely individual samples of this metric will vary, aggregating this metric over a larger time window should provide a decent picture into how this BoltDB store can perform | logs/second | sample |
-| `consul.raft.commitNumLogs` | Measures the count of logs processed for application to the FSM in a single batch. | logs | gauge |
-| `consul.raft.commitTime` | Measures the time it takes to commit a new entry to the Raft log on the leader. | ms | timer |
-| `consul.raft.fsm.lastRestoreDuration` | Measures the time taken to restore the FSM from a snapshot on an agent restart or from the leader calling installSnapshot. This is a gauge that holds it's value since most servers only restore during restarts which are typically infrequent. | ms | gauge |
-| `consul.raft.fsm.snapshot` | Measures the time taken by the FSM to record the current state for the snapshot. | ms | timer |
-| `consul.raft.fsm.apply` | Measures the time to apply a log to the FSM. | ms | timer |
-| `consul.raft.fsm.enqueue` | Measures the amount of time to enqueue a batch of logs for the FSM to apply. | ms | timer |
-| `consul.raft.fsm.restore` | Measures the time taken by the FSM to restore its state from a snapshot. | ms | timer |
-| `consul.raft.last_index` | Represents the raft applied index. | index | gauge |
-| `consul.raft.leader.dispatchLog` | Measures the time it takes for the leader to write log entries to disk. | ms | timer |
-| `consul.raft.leader.dispatchNumLogs` | Measures the number of logs committed to disk in a batch. | logs | gauge |
-| `consul.raft.logstore.verifier.checkpoints_written` | Counts the number of checkpoint entries written to the LogStore. | checkpoints | counter |
-| `consul.raft.logstore.verifier.dropped_reports` | Counts how many times the verifier routine was still busy when the next checksum came in and so verification for a range was skipped. If you see this happen, consider increasing the interval between checkpoints with [`raft_logstore.verification.interval`](/consul/docs/agent/config/config-files#raft_logstore_verification) | reports dropped | counter |
-| `consul.raft.logstore.verifier.ranges_verified` | Counts the number of log ranges for which a verification report has been completed. Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for more information. | log ranges verifications | counter |
-| `consul.raft.logstore.verifier.read_checksum_failures` | Counts the number of times a range of logs between two check points contained at least one disk corruption. Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for more information. | disk corruptions | counter |
-| `consul.raft.logstore.verifier.write_checksum_failures` | Counts the number of times a follower has a different checksum to the leader at the point where it writes to the log. This could be caused by either a disk-corruption on the leader (unlikely) or some other corruption of the log entries in-flight. | in-flight corruptions | counter |
-| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/consul/docs/agent/config/config-files#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/consul/docs/install/performance) guide for more details. | ms | timer |
-| `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. Note: this metric won't be emitted until the leader writes a snapshot. After an upgrade to Consul 1.10.0 it won't be emitted until the oldest log was written after the upgrade. | ms | gauge |
-| `consul.raft.replication.heartbeat` | Measures the time taken to invoke appendEntries on a peer, so that it doesn't timeout on a periodic basis. | ms | timer |
-| `consul.raft.replication.appendEntries` | Measures the time it takes to replicate log entries to followers. This is a general indicator of the load pressure on the Consul servers, as well as the performance of the communication between the servers. | ms | timer |
-| `consul.raft.replication.appendEntries.rpc` | Measures the time taken by the append entries RPC to replicate the log entries of a leader agent onto its follower agent(s). | ms | timer |
-| `consul.raft.replication.appendEntries.logs` | Counts the number of logs replicated to an agent to bring it up to speed with the leader's logs. | logs appended/ interval | counter |
-| `consul.raft.restore` | Counts the number of times the restore operation has been performed by the agent. Here, restore refers to the action of raft consuming an external snapshot to restore its state. | operation invoked / interval | counter |
-| `consul.raft.restoreUserSnapshot` | Measures the time taken by the agent to restore the FSM state from a user's snapshot | ms | timer |
-| `consul.raft.rpc.appendEntries` | Measures the time taken to process an append entries RPC call from an agent. | ms | timer |
-| `consul.raft.rpc.appendEntries.storeLogs` | Measures the time taken to add any outstanding logs for an agent, since the last appendEntries was invoked | ms | timer |
-| `consul.raft.rpc.appendEntries.processLogs` | Measures the time taken to process the outstanding log entries of an agent. | ms | timer |
-| `consul.raft.rpc.installSnapshot` | Measures the time taken to process the installSnapshot RPC call. This metric should only be seen on agents which are currently in the follower state. | ms | timer |
-| `consul.raft.rpc.processHeartBeat` | Measures the time taken to process a heartbeat request. | ms | timer |
-| `consul.raft.rpc.requestVote` | Measures the time taken to process the request vote RPC call. | ms | timer |
-| `consul.raft.snapshot.create` | Measures the time taken to initialize the snapshot process. | ms | timer |
-| `consul.raft.snapshot.persist` | Measures the time taken to dump the current snapshot taken by the Consul agent to the disk. | ms | timer |
-| `consul.raft.snapshot.takeSnapshot` | Measures the total time involved in taking the current snapshot (creating one and persisting it) by the Consul agent. | ms | timer |
-| `consul.serf.snapshot.appendLine` | Measures the time taken by the Consul agent to append an entry into the existing log. | ms | timer |
-| `consul.serf.snapshot.compact` | Measures the time taken by the Consul agent to compact a log. This operation occurs only when the snapshot becomes large enough to justify the compaction . | ms | timer |
-| `consul.raft.state.candidate` | Increments whenever a Consul server starts an election. If this increments without a leadership change occurring it could indicate that a single server is overloaded or is experiencing network connectivity issues. | election attempts / interval | counter |
-| `consul.raft.state.leader` | Increments whenever a Consul server becomes a leader. If there are frequent leadership changes this may be indication that the servers are overloaded and aren't meeting the soft real-time requirements for Raft, or that there are networking problems between the servers. | leadership transitions / interval | counter |
-| `consul.raft.state.follower` | Counts the number of times an agent has entered the follower mode. This happens when a new agent joins the cluster or after the end of a leader election. | follower state entered / interval | counter |
-| `consul.raft.transition.heartbeat_timeout` | The number of times an agent has transitioned to the Candidate state, after receive no heartbeat messages from the last known leader. | timeouts / interval | counter |
-| `consul.raft.verify_leader` | This metric doesn't have a direct correlation to the leader change. It just counts the number of times an agent checks if it is still the leader or not. For example, during every consistent read, the check is done. Depending on the load in the system, this metric count can be high as it is incremented each time a consistent read is completed. | checks / interval | Counter |
-| `consul.raft.wal.head_truncations` | Counts how many log entries have been truncated from the head - i.e. the oldest entries. by graphing the rate of change over time you can see individual truncate calls as spikes. | logs entries truncated | counter |
-| `consul.raft.wal.last_segment_age_seconds` | A gauge that is set each time we rotate a segment and describes the number of seconds between when that segment file was first created and when it was sealed. this gives a rough estimate how quickly writes are filling the disk. | seconds | gauge |
-| `consul.raft.wal.log_appends` | Counts the number of calls to StoreLog(s) i.e. number of batches of entries appended. | calls | counter |
-| `consul.raft.wal.log_entries_read` | Counts the number of log entries read. | log entries read | counter |
-| `consul.raft.wal.log_entries_written` | Counts the number of log entries written. | log entries written | counter |
-| `consul.raft.wal.log_entry_bytes_read` | Counts the bytes of log entry read from segments before decoding. actual bytes read from disk might be higher as it includes headers and index entries and possible secondary reads for large entries that don't fit in buffers. | bytes | counter |
-| `consul.raft.wal.log_entry_bytes_written` | Counts the bytes of log entry after encoding with Codec. Actual bytes written to disk might be slightly higher as it includes headers and index entries. | bytes | counter |
-| `consul.raft.wal.segment_rotations` | Counts how many times we move to a new segment file. | rotations | counter |
-| `consul.raft.wal.stable_gets` | Counts how many calls to StableStore.Get or GetUint64. | calls | counter |
-| `consul.raft.wal.stable_sets` | Counts how many calls to StableStore.Set or SetUint64. | calls | counter |
-| `consul.raft.wal.tail_truncations` | Counts how many log entries have been truncated from the head - i.e. the newest entries. by graphing the rate of change over time you can see individual truncate calls as spikes. | logs entries truncated | counter |
-| `consul.rpc.accept_conn` | Increments when a server accepts an RPC connection. | connections | counter |
-| `consul.rpc.rate_limit.exceeded` | Increments whenever an RPC is over a configured rate limit. In permissive mode, the RPC is still allowed to proceed. | RPCs | counter |
-| `consul.rpc.rate_limit.log_dropped` | Increments whenever a log that is emitted because an RPC exceeded a rate limit gets dropped because the output buffer is full. | log messages dropped | counter |
-| `consul.catalog.register` | Measures the time it takes to complete a catalog register operation. | ms | timer |
-| `consul.catalog.deregister` | Measures the time it takes to complete a catalog deregister operation. | ms | timer |
-| `consul.server.isLeader` | Track if a server is a leader(1) or not(0) | 1 or 0 | gauge |
-| `consul.fsm.register` | Measures the time it takes to apply a catalog register operation to the FSM. | ms | timer |
-| `consul.fsm.deregister` | Measures the time it takes to apply a catalog deregister operation to the FSM. | ms | timer |
-| `consul.fsm.session` | Measures the time it takes to apply the given session operation to the FSM. | ms | timer |
-| `consul.fsm.kvs` | Measures the time it takes to apply the given KV operation to the FSM. | ms | timer |
-| `consul.fsm.tombstone` | Measures the time it takes to apply the given tombstone operation to the FSM. | ms | timer |
-| `consul.fsm.coordinate.batch-update` | Measures the time it takes to apply the given batch coordinate update to the FSM. | ms | timer |
-| `consul.fsm.prepared-query` | Measures the time it takes to apply the given prepared query update operation to the FSM. | ms | timer |
-| `consul.fsm.txn` | Measures the time it takes to apply the given transaction update to the FSM. | ms | timer |
-| `consul.fsm.autopilot` | Measures the time it takes to apply the given autopilot update to the FSM. | ms | timer |
-| `consul.fsm.persist` | Measures the time it takes to persist the FSM to a raft snapshot. | ms | timer |
-| `consul.fsm.intention` | Measures the time it takes to apply an intention operation to the state store. | ms | timer |
-| `consul.fsm.ca` | Measures the time it takes to apply CA configuration operations to the FSM. | ms | timer |
-| `consul.fsm.ca.leaf` | Measures the time it takes to apply an operation while signing a leaf certificate. | ms | timer |
-| `consul.fsm.acl.token` | Measures the time it takes to apply an ACL token operation to the FSM. | ms | timer |
-| `consul.fsm.acl.policy` | Measures the time it takes to apply an ACL policy operation to the FSM. | ms | timer |
-| `consul.fsm.acl.bindingrule` | Measures the time it takes to apply an ACL binding rule operation to the FSM. | ms | timer |
-| `consul.fsm.acl.authmethod` | Measures the time it takes to apply an ACL authmethod operation to the FSM. | ms | timer |
-| `consul.fsm.system_metadata` | Measures the time it takes to apply a system metadata operation to the FSM. | ms | timer |
-| `consul.kvs.apply` | Measures the time it takes to complete an update to the KV store. | ms | timer |
-| `consul.leader.barrier` | Measures the time spent waiting for the raft barrier upon gaining leadership. | ms | timer |
-| `consul.leader.reconcile` | Measures the time spent updating the raft store from the serf member information. | ms | timer |
-| `consul.leader.reconcileMember` | Measures the time spent updating the raft store for a single serf member's information. | ms | timer |
-| `consul.leader.reapTombstones` | Measures the time spent clearing tombstones. | ms | timer |
-| `consul.leader.replication.acl-policies.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of ACL policy replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.acl-policies.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of ACL policies in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.leader.replication.acl-roles.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of ACL role replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.acl-roles.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of ACL roles in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.leader.replication.acl-tokens.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of ACL token replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.acl-tokens.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of ACL tokens in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.leader.replication.config-entries.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of config entry replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.config-entries.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of config entries in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.leader.replication.federation-state.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of federation state replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.federation-state.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of federation states in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.leader.replication.namespaces.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of namespace replication was successful or 0 if there was an error. | healthy | gauge |
-| `consul.leader.replication.namespaces.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of namespaces in the primary datacenter that have been successfully replicated. | index | gauge |
-| `consul.prepared-query.apply` | Measures the time it takes to apply a prepared query update. | ms | timer |
-| `consul.prepared-query.execute_remote` | Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. | ms | timer |
-| `consul.prepared-query.execute` | Measures the time it takes to process a prepared query execute request. | ms | timer |
-| `consul.prepared-query.explain` | Measures the time it takes to process a prepared query explain request. | ms | timer |
-| `consul.rpc.raft_handoff` | Increments when a server accepts a Raft-related RPC connection. | connections | counter |
-| `consul.rpc.request` | Increments when a server receives a Consul-related RPC request. | requests | counter |
-| `consul.rpc.request_error` | Increments when a server returns an error from an RPC request. | errors | counter |
-| `consul.rpc.query` | Increments when a server receives a read RPC request, indicating the rate of new read queries. See consul.rpc.queries_blocking for the current number of in-flight blocking RPC calls. This metric changed in 1.7.0 to only increment on the start of a query. The rate of queries will appear lower, but is more accurate. | queries | counter |
-| `consul.rpc.queries_blocking` | The current number of in-flight blocking queries the server is handling. | queries | gauge |
-| `consul.rpc.cross-dc` | Increments when a server sends a (potentially blocking) cross datacenter RPC query. | queries | counter |
-| `consul.rpc.consistentRead` | Measures the time spent confirming that a consistent read can be performed. | ms | timer |
-| `consul.session.apply` | Measures the time spent applying a session update. | ms | timer |
-| `consul.session.renew` | Measures the time spent renewing a session. | ms | timer |
-| `consul.session_ttl.invalidate` | Measures the time spent invalidating an expired session. | ms | timer |
-| `consul.txn.apply` | Measures the time spent applying a transaction operation. | ms | timer |
-| `consul.txn.read` | Measures the time spent returning a read transaction. | ms | timer |
-| `consul.grpc.client.request.count` | Counts the number of gRPC requests made by the client agent to a Consul server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | requests | counter |
-| `consul.grpc.client.connection.count` | Counts the number of new gRPC connections opened by the client agent to a Consul server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | connections | counter |
-| `consul.grpc.client.connections` | Measures the number of active gRPC connections open from the client agent to any Consul servers. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | connections | gauge |
-| `consul.grpc.server.request.count` | Counts the number of gRPC requests received by the server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | requests | counter |
-| `consul.grpc.server.connection.count` | Counts the number of new gRPC connections received by the server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | connections | counter |
-| `consul.grpc.server.connections` | Measures the number of active gRPC connections open on the server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | connections | gauge |
-| `consul.grpc.server.stream.count` | Counts the number of new gRPC streams received by the server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | streams | counter |
-| `consul.grpc.server.streams` | Measures the number of active gRPC streams handled by the server. Includes a `server_type` label indicating either the `internal` or `external` gRPC server. | streams | gauge |
-| `consul.xds.server.streams` | Measures the number of active xDS streams handled by the server split by protocol version. | streams | gauge |
-| `consul.xds.server.streamsUnauthenticated` | Measures the number of active xDS streams handled by the server that are unauthenticated because ACLs are not enabled or ACL tokens were missing. | streams | gauge |
-| `consul.xds.server.idealStreamsMax` | The maximum number of xDS streams per server, chosen to achieve a roughly even spread of load across servers. | streams | gauge |
-| `consul.xds.server.streamDrained` | Counts the number of xDS streams that are drained when rebalancing the load between servers. | streams | counter |
-| `consul.xds.server.streamStart` | Measures the time taken to first generate xDS resources after an xDS stream is opened. | ms | timer |
-
-
-## Server Workload
-
-** Requirements: **
-* Consul 1.12.0+
-
-The following label-based RPC metrics provide insight about the workload on a Consul server and the source of the workload.
-
-The [`prefix_filter`](/consul/docs/agent/config/config-files#telemetry-prefix_filter) telemetry configuration setting blocks or enables all RPC metric method calls. Specify the RPC metrics you want to allow in the `prefix_filter`:
-
-
-
-```hcl
-telemetry {
- prefix_filter = ["+consul.rpc.server.call"]
-}
-```
-
-```json
-{
- "telemetry": {
- "prefix_filter": [
- "+consul.rpc.server.call"
- ]
- }
-}
-```
-
-
-
-| Metric | Description | Unit | Type |
-| ------------------------------------- | --------------------------------------------------------- | ------ | --------- |
-| `consul.rpc.server.call` | Measures the elapsed time taken to complete an RPC call. | ms | summary |
-
-### Labels
-
-The server workload metrics above come with the following labels:
-
-| Label Name | Description | Possible values |
-| ------------------------------------- | -------------------------------------------------------------------- | --------------------------------------- |
-| `method` | The name of the RPC method. | The value of any RPC request in Consul. |
-| `errored` | Indicates whether the RPC call errored. | `true` or `false`. |
-| `request_type` | Whether it is a `read` or `write` request. | `read`, `write` or `unreported`. |
-| `rpc_type` | The RPC implementation. | `net/rpc` or `internal`. |
-| `leader` | Whether the server was a `leader` or not at the time of the request. | `true`, `false` or `unreported`. |
-
-#### Label Explanations
-
-The `internal` value for the `rpc_type` in the table above refers to leader and cluster management RPC operations that Consul performs.
-Historically, `internal` RPC operation metrics were accounted under the same metric names.
-
-The `unreported` value for the `request_type` in the table above refers to RPC requests within Consul where it is difficult to ascertain whether a request is `read` or `write` type.
-
-The `unreported` value for the `leader` label in the table above refers to RPC requests where Consul cannot determine the leadership status for a server.
-
-#### Read Request Labels
-
-In addition to the labels above, for read requests, the following may be populated:
-
-| Label Name | Description | Possible values |
-| ------------------------------------- | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ |
-| `blocking` | Whether the read request passed in a `MinQueryIndex`. | `true` if a MinQueryIndex was passed, `false` otherwise. |
-| `target_datacenter` | The target datacenter for the read request. | The string value of the target datacenter for the request. |
-| `locality` | Gives an indication of whether the RPC request is local or has been forwarded. | `local` if current server data center is the same as `target_datacenter`, otherwise `forwarded`. |
-
-Here is a Prometheus style example of an RPC metric and its labels:
-
-
-
-```
- ...
- consul_rpc_server_call{errored="false",method="Catalog.ListNodes",request_type="read",rpc_type="net/rpc",quantile="0.5"} 255
- ...
-```
-
-
-
-
-## Cluster Health
-
-These metrics give insight into the health of the cluster as a whole.
-Query for the `consul.memberlist.*` and `consul.serf.*` metrics can be appended
-with certain labels to further distinguish data between different gossip pools.
-The supported label for CE is `network`, while `segment`, `partition`, `area`
-are allowed for .
-
-| Metric | Description | Unit | Type |
-|----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------|---------|
-| `consul.memberlist.degraded.probe` | Counts the number of times the agent has performed failure detection on another agent at a slower probe rate. The agent uses its own health metric as an indicator to perform this action. (If its health score is low, means that the node is healthy, and vice versa.) | probes / interval | counter |
-| `consul.memberlist.degraded.timeout` | Counts the number of times an agent was marked as a dead node, whilst not getting enough confirmations from a randomly selected list of agent nodes in an agent's membership. | occurrence / interval | counter |
-| `consul.memberlist.msg.dead` | Counts the number of times an agent has marked another agent to be a dead node. | messages / interval | counter |
-| `consul.memberlist.health.score` | Describes a node's perception of its own health based on how well it is meeting the soft real-time requirements of the protocol. This metric ranges from 0 to 8, where 0 indicates "totally healthy". This health score is used to scale the time between outgoing probes, and higher scores translate into longer probing intervals. For more details see section IV of the Lifeguard paper: https://arxiv.org/pdf/1707.00788.pdf | score | gauge |
-| `consul.memberlist.msg.suspect` | Increments when an agent suspects another as failed when executing random probes as part of the gossip protocol. These can be an indicator of overloaded agents, network problems, or configuration errors where agents can not connect to each other on the [required ports](/consul/docs/agent/config/config-files#ports). | suspect messages received / interval | counter |
-| `consul.memberlist.tcp.accept` | Counts the number of times an agent has accepted an incoming TCP stream connection. | connections accepted / interval | counter |
-| `consul.memberlist.udp.sent/received` | Measures the total number of bytes sent/received by an agent through the UDP protocol. | bytes sent or bytes received / interval | counter |
-| `consul.memberlist.tcp.connect` | Counts the number of times an agent has initiated a push/pull sync with an other agent. | push/pull initiated / interval | counter |
-| `consul.memberlist.tcp.sent` | Measures the total number of bytes sent by an agent through the TCP protocol | bytes sent / interval | counter |
-| `consul.memberlist.gossip` | Measures the time taken for gossip messages to be broadcasted to a set of randomly selected nodes. | ms | timer |
-| `consul.memberlist.msg_alive` | Counts the number of alive messages, that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter |
-| `consul.memberlist.msg_dead` | The number of dead messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter |
-| `consul.memberlist.msg_suspect` | The number of suspect messages that the agent has processed so far, based on the message information given by the network layer. | messages / Interval | counter |
-| `consul.memberlist.node.instances` | Tracks the number of instances in each of the node states: alive, dead, suspect, and left. | nodes | gauge |
-| `consul.memberlist.probeNode` | Measures the time taken to perform a single round of failure detection on a select agent. | nodes / Interval | counter |
-| `consul.memberlist.pushPullNode` | Measures the number of agents that have exchanged state with this agent. | nodes / Interval | counter |
-| `consul.memberlist.queue.broadcasts` | Measures the number of messages waiting to be broadcast to other gossip participants. | messages | sample |
-| `consul.memberlist.size.local` | Measures the size in bytes of the memberlist before it is sent to another gossip recipient. | bytes | gauge |
-| `consul.memberlist.size.remote` | Measures the size in bytes of incoming memberlists from other gossip participants. | bytes | gauge |
-| `consul.serf.member.failed` | Increments when an agent is marked dead. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/consul/docs/agent/config/config-files#ports). | failures / interval | counter |
-| `consul.serf.member.flap` | Available in Consul 0.7 and later, this increments when an agent is marked dead and then recovers within a short time period. This can be an indicator of overloaded agents, network problems, or configuration errors where agents cannot connect to each other on the [required ports](/consul/docs/agent/config/config-files#ports). | flaps / interval | counter |
-| `consul.serf.member.join` | Increments when an agent joins the cluster. If an agent flapped or failed this counter also increments when it re-joins. | joins / interval | counter |
-| `consul.serf.member.left` | Increments when an agent leaves the cluster. | leaves / interval | counter |
-| `consul.serf.events` | Increments when an agent processes an [event](/consul/commands/event). Consul uses events internally so there may be additional events showing in telemetry. There are also a per-event counters emitted as `consul.serf.events.`. | events / interval | counter |
-| `consul.serf.events.` | Breakdown of `consul.serf.events` by type of event. | events / interval | counter |
-| `consul.serf.msgs.sent` | This metric is sample of the number of bytes of messages broadcast to the cluster. In a given time interval, the sum of this metric is the total number of bytes sent and the count is the number of messages sent. | message bytes / interval | counter |
-| `consul.autopilot.failure_tolerance` | Tracks the number of voting servers that the cluster can lose while continuing to function. | servers | gauge |
-| `consul.autopilot.healthy` | Tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | boolean | gauge |
-| `consul.session_ttl.active` | Tracks the active number of sessions being tracked. | sessions | gauge |
-| `consul.catalog.service.query` | Increments for each catalog query for the given service. | queries | counter |
-| `consul.catalog.service.query-tag` | Increments for each catalog query for the given service with the given tag. | queries | counter |
-| `consul.catalog.service.query-tags` | Increments for each catalog query for the given service with the given tags. | queries | counter |
-| `consul.catalog.service.not-found` | Increments for each catalog query where the given service could not be found. | queries | counter |
-| `consul.catalog.connect.query` | Increments for each mesh-based catalog query for the given service. | queries | counter |
-| `consul.catalog.connect.query-tag` | Increments for each mesh-based catalog query for the given service with the given tag. | queries | counter |
-| `consul.catalog.connect.query-tags` | Increments for each mesh-based catalog query for the given service with the given tags. | queries | counter |
-| `consul.catalog.connect.not-found` | Increments for each mesh-based catalog query where the given service could not be found. | queries | counter |
-
-## Service Mesh Built-in Proxy Metrics
-
-Consul service mesh's built-in proxy is by default configured to log metrics to the
-same sink as the agent that starts it.
-
-When running in this mode it emits some basic metrics. These will be expanded
-upon in the future.
-
-All metrics are prefixed with `consul.proxy.` to distinguish
-between multiple proxies on a given host. The table below use `web` as an
-example service name for brevity.
-
-### Labels
-
-Most labels have a `dst` label and some have a `src` label. When using metrics
-sinks and timeseries stores that support labels or tags, these allow aggregating
-the connections by service name.
-
-Assuming all services are using a managed built-in proxy, you can get a complete
-overview of both number of open connections and bytes sent and received between
-all services by aggregating over these metrics.
-
-For example aggregating over all `upstream` (i.e. outbound) connections which
-have both `src` and `dst` labels, you can get a sum of all the bandwidth in and
-out of a given service or the total number of connections between two services.
-
-### Metrics Reference
-
-The standard go runtime metrics are exported by `go-metrics` as with Consul
-agent. The table below describes the additional metrics exported by the proxy.
-
-| Metric | Description | Unit | Type |
-| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
-| `consul.proxy.web.runtime.*` | The same go runtime metrics as documented for the agent above. | mixed | mixed |
-| `consul.proxy.web.inbound.conns` | Shows the current number of connections open from inbound requests to the proxy. Where supported a `dst` label is added indicating the service name the proxy represents. | connections | gauge |
-| `consul.proxy.web.inbound.rx_bytes` | Increments by the number of bytes received from an inbound client connection. Where supported a `dst` label is added indicating the service name the proxy represents. | bytes | counter |
-| `consul.proxy.web.inbound.tx_bytes` | Increments by the number of bytes transferred to an inbound client connection. Where supported a `dst` label is added indicating the service name the proxy represents. | bytes | counter |
-| `consul.proxy.web.upstream.conns` | Shows the current number of connections open from a proxy instance to an upstream. Where supported a `src` label is added indicating the service name the proxy represents, and a `dst` label is added indicating the service name the upstream is connecting to. | connections | gauge |
-| `consul.proxy.web.inbound.rx_bytes` | Increments by the number of bytes received from an upstream connection. Where supported a `src` label is added indicating the service name the proxy represents, and a `dst` label is added indicating the service name the upstream is connecting to. | bytes | counter |
-| `consul.proxy.web.inbound.tx_bytes` | Increments by the number of bytes transferred to an upstream connection. Where supported a `src` label is added indicating the service name the proxy represents, and a `dst` label is added indicating the service name the upstream is connecting to. | bytes | counter |
-
-## Peering metrics
-
-**Requirements:**
-- Consul 1.13.0+
-
-[Cluster peering](/consul/docs/connect/cluster-peering) refers to Consul clusters that communicate through a peer connection, as opposed to a federated connection. Consul collects metrics that describe the number of services exported to a peered cluster. Peering metrics are only emitted by the leader server. These metrics are emitted every 9 seconds.
-
-| Metric | Description | Unit | Type |
-| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | ------- |
-| `consul.peering.exported_services` | Counts the number of services exported with [exported service configuration entries](/consul/docs/connect/config-entries/exported-services) to a peer cluster. | count | gauge |
-| `consul.peering.healthy` | Tracks the health of a peering connection as reported by the server. If Consul detects errors while sending or receiving from a peer which do not recover within a reasonable time, this metric returns 0. Healthy connections return 1. | health | gauge |
-
-### Labels
-
-Consul attaches the following labels to metric values.
-
-| Label Name | Description | Possible values |
-| ------------------------------------- | -------------------------------------------------------------------------------- | ----------------------------------------- |
-| `peer_name` | The name of the peering on the reporting cluster or leader. | Any defined peer name in the cluster |
-| `peer_id` | The ID of a peer connected to the reporting cluster or leader. | Any UUID |
-| `partition` | Name of the partition that the peering is created in. | Any defined partition name in the cluster |
-
-## Server Host Metrics
-
-Consul servers can report the following metrics about the host's system resources.
-This feature must be enabled in the [agent telemetry configuration](/consul/docs/agent/config/config-files#telemetry-enable_host_metrics).
-Note that if the Consul server is operating inside a container these metrics
-still report host resource usage and do not report any resource limits placed
-on the container.
-
-**Requirements:**
-- Consul 1.15.3+
-
-| Metric | Description | Unit | Type |
-| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
-| `consul.host.memory.total` | The total physical memory in bytes | mixed | mixed |
-| `consul.host.memory.available` | The available physical memory in bytes | mixed | mixed |
-| `consul.host.memory.free` | The free physical memory in bytes | mixed | mixed |
-| `consul.host.memory.used` | The used physical memory in bytes | mixed | mixed |
-| `consul.host.memory.used_percent` | The used physical memory as a percentage of total physical memory | mixed | mixed |
-| `consul.host.cpu.total` | The host's total cpu utilization
-| `consul.host.cpu.user` | The cpu utilization in user space
-| `consul.host.cpu.idle` | The cpu utilization in idle state
-| `consul.host.cpu.iowait` | The cpu utilization in iowait state
-| `consul.host.cpu.system` | The cpu utilization in system space
-| `consul.host.disk.size` | The size in bytes of the data_dir disk
-| `consul.host.disk.used` | The number of bytes used on the data_dir disk
-| `consul.host.disk.available` | The number of bytes available on the data_dir disk
-| `consul.host.disk.used_percent` | The percentage of disk space used on the data_dir disk
-| `consul.host.disk.inodes_percent` | The percentage of inode usage on the data_dir disk
-| `consul.host.uptime` | The uptime of the host in seconds
diff --git a/website/content/docs/agent/wal-logstore/enable.mdx b/website/content/docs/agent/wal-logstore/enable.mdx
deleted file mode 100644
index e80315928f2a..000000000000
--- a/website/content/docs/agent/wal-logstore/enable.mdx
+++ /dev/null
@@ -1,151 +0,0 @@
----
-layout: docs
-page_title: Enable the experimental WAL LogStore backend
-description: >-
- Learn how to safely configure and test the experimental WAL backend in your Consul deployment.
----
-
-# Enable the experimental WAL LogStore backend
-
-This topic describes how to safely configure and test the WAL backend in your Consul deployment.
-
-The overall process for enabling the WAL LogStore backend for one server consists of the following steps. In production environments, we recommend starting by enabling the backend on a single server . If you eventually choose to expand the test to further servers, you must repeat these steps for each one.
-
-1. Enable log verification.
-1. Select target server to enable WAL.
-1. Stop target server gracefully.
-1. Remove data directory from target server.
-1. Update target server's configuration.
-1. Start the target server.
-1. Monitor target server raft metrics and logs.
-
-!> **Experimental feature:** The WAL LogStore backend is experimental and may contain bugs that could cause data loss. Follow this guide to manage risk during testing.
-
-## Requirements
-
-- Consul v1.15 or later is required for all servers in the datacenter. Refer to the [standard upgrade procedure](/consul/docs/upgrading/instructions/general-process) and the [1.15 upgrade notes](/consul/docs/upgrading/upgrade-specific#consul-1-15-x) for additional information.
-- A Consul cluster with at least three nodes are required to safely test the WAL backend without downtime.
-
-We recommend taking the following additional measures:
-
-- Take a snapshot prior to testing.
-- Monitor Consul server metrics and logs, and set an alert on specific log events that occur when WAL is enabled. Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for more information.
-- Enable WAL in a pre-production environment and run it for a several days before enabling it in production.
-
-## Known issues
-
-The following issues exist in Consul 1.15.0 and 1.15.1.
-
- * A follower that is disconnected may be unable to catch up if it is using the WAL backend.
- * Restoring user snapshots can break replication to WAL-enabled followers.
- * Restoring user snapshots can cause a WAL-enabled leader to panic.
-
-## Risks
-
-While their likelihood remains low to very low, be aware of the following risks before implementing the WAL backend:
-
- - If WAL corrupts data on a Consul server agent, the server data cannot be recovered. Restart the server with an empty data directory and reload its state from the leader to resolve the issue.
- - WAL may corrupt data or contain a defect that causes the server to panic and crash. WAL may not restart if the defect recurs when WAL reads from the logs on startup. Restart the server with an empty data directory and reload its state from the leader to resolve the issue.
- - If WAL corrupts data, clients may read corrupted data from the Consul server, such as invalid IP addresses or unmatched tokens. This outcome is unlikely even if a recurring defect causes WAL to corrupt data because replication uses objects cached in memory instead of reads from disk. Restart the server with an empty data directory and reload its state from the leader to resolve the issue.
- - If you enable a Consul CE server to use WAL or enable WAL on a voting server with Consul Enterprise, WAL may corrupt the server's state, become the leader, and replicate the corrupted state to all other servers. In this case, restoring from backup is required to recover a completely uncorrupted state. Test WAL on a non-voting server in Enterprise to prevent this outcome. You can add a new non-voting server to the cluster to test with if there are no existing ones.
-
-## Enable log verification
-
-You must enable log verification on all voting servers in Enterprise and all servers in CE because the leader writes verification checkpoints.
-
-1. On each voting server, add the following to the server's configuration file:
-
- ```hcl
- raft_logstore {
- verification {
- enabled = true
- interval = "60s"
- }
- }
- ```
-
-1. Restart the server to apply the changes. The `consul reload` command is not sufficient to apply `raft_logstore` configuration changes.
-1. Run the `consul operator raft list-peers` command to wait for each server to become a healthy voter before moving on to the next. This may take a few minutes for large snapshots.
-
-When complete, the server's logs should contain verifier reports that appear like the following example:
-
-```log hideClipboard
-2023-01-31T14:44:31.174Z [INFO] agent.server.raft.logstore.verifier: verification checksum OK: elapsed=488.463268ms leaderChecksum=f15db83976f2328c rangeEnd=357802 rangeStart=298132 readChecksum=f15db83976f2328c
-```
-
-## Select target server to enable WAL
-
-If you are using Consul CE, or Consul Enterprise without non-voting servers, select a follower server to enable WAL. As noted in [Risks](#risks), Consul Enterprise users with non-voting servers should first select a non-voting server, or consider adding another server as a non-voter to test on.
-
-Retrieve the current state of the servers by running the following command:
-
-```shell-session
-$ consul operator raft list-peers
-```
-
-## Stop target server
-
-Stop the target server gracefully. For example, if you are using `systemd`,
-run the following command:
-
-```shell-session
-$ systemctl stop consul
-```
-
-If your environment uses configuration management automation that might interfere with this process, such as Chef or Puppet, you must disable them until you have completely enabled WAL as a storage backend.
-
-## Remove data directory from target server
-
-Temporarily moving the data directory to a different location is less destructive than deleting it. We recommend moving it in cases where you unsuccessfully enable WAL. Do not use the old data directory (`/data-dir/raft.bak`) for recovery after restarting the server. We recommend eventually deleting the old directory.
-
-The following example assumes the `data_dir` in the server's configuration is `/data-dir` and renames it to `/data-dir.bak`.
-
-```shell-session
-$ mv /data-dir/raft /data-dir/raft.bak
-```
-
-When switching backends, you must always remove _the entire raft directory_, not just the `raft.db` file or `wal` directory. The log must always be consistent with the snapshots to avoid undefined behavior or data loss.
-
-## Update target server configuration
-
-Add the following to the target server's configuration file:
-
-```hcl
-raft_logstore {
- backend = "wal"
- verification {
- enabled = true
- interval = "60s"
- }
-}
-```
-
-## Start target server
-
-Start the target server. For example, if you are using `systemd`, run the following command:
-
-```shell-session
-$ systemctl start consul
-```
-
-Watch for the server to become a healthy voter again.
-
-```shell-session
-$ consul operator raft list-peers
-```
-
-## Monitor target server Raft metrics and logs
-
-Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for details.
-
-We recommend leaving the cluster in the test configuration for several days or weeks, as long as you observe no errors. An extended test provides more confidence that WAL operates correctly under varied workloads and during routine server restarts. If you observe any errors, end the test immediately and report them.
-
-If you disabled configuration management automation, consider reenabling it during the testing phase to pick up other updates for the host. You must ensure that it does not revert the Consul configuration file and remove the altered backend configuration. One way to do this is add the `raft_logstore` block to a separate file that is not managed by your automation. This file can either be added to the directory if [`-config-dir`](/consul/docs/agent/config/cli-flags#_config_dir) is used or as an additional [`-config-file`](/consul/docs/agent/config/cli-flags#_config_file) argument.
-
-## Next steps
-
-- If you observe any verification errors, performance anomalies, or other suspicious behavior from the target server during the test, you should immediately follow [the procedure to revert back to BoltDB](/consul/docs/agent/wal-logstore/revert-to-boltdb). Report failures through GitHub.
-
-- If you do not see errors and would like to expand the test further, you can repeat the above procedure on another target server. We suggest waiting after each test expansion and slowly rolling WAL out to other parts of your environment. Once the majority of your servers use WAL, any bugs not yet discovered may result in cluster unavailability.
-
-- If you wish to permanently enable WAL on all servers, repeat the steps described in this topic for each server. Even if `backend = "wal"` is set in logs, servers continue to use BoltDB if they find an existing raft.db file in the data directory.
diff --git a/website/content/docs/agent/wal-logstore/index.mdx b/website/content/docs/agent/wal-logstore/index.mdx
deleted file mode 100644
index b215db158c8b..000000000000
--- a/website/content/docs/agent/wal-logstore/index.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: docs
-page_title: WAL LogStore Backend Overview
-description: >-
- The experimental WAL (write-ahead log) LogStore backend shipped in Consul 1.15 is intended to replace the BoltDB backend, improving performance and log storage issues.
----
-
-# Experimental WAL LogStore backend overview
-
-This topic provides an overview of the WAL (write-ahead log) LogStore backend.
-The WAL backend is an experimental feature. Refer to
-[Requirements](/consul/docs/agent/wal-logstore/enable#requirements) for
-supported environments and known issues.
-
-We do not recommend enabling the WAL backend in production without following
-[our guide for safe
-testing](/consul/docs/agent/wal-logstore/enable).
-
-## WAL versus BoltDB
-
-WAL implements a traditional log with rotating, append-only log files. WAL resolves many issues with the existing `LogStore` provided by the BoltDB backend. The BoltDB `LogStore` is a copy-on-write BTree, which is not optimized for append-only, write-heavy workloads.
-
-### BoltDB storage scalability issues
-
-The existing BoltDB log store inefficiently stores append-only logs to disk because it was designed as a full key-value database. It is a single file that only ever grows. Deleting the oldest logs, which Consul does regularly when it makes new snapshots of the state, leaves free space in the file. The free space must be tracked in a `freelist` so that BoltDB can reuse it on future writes. By contrast, a simple segmented log can delete the oldest log files from disk.
-
-A burst of writes at double or triple the normal volume can suddenly cause the log file to grow to several times its steady-state size. After Consul takes the next snapshot and truncates the oldest logs, the resulting file is mostly empty space.
-
-To track the free space, Consul must write extra metadata to disk with every write. The metadata is proportional to the amount of free pages, so after a large burst write latencies tend to increase. In some cases, the latencies cause serious performance degradation to the cluster.
-
-To mitigate risks associated with sudden bursts of log data, Consul tries to limit lots of logs from accumulating in the LogStore. Significantly larger BoltDB files are slower to append to because the tree is deeper and freelist larger. For this reason, Consul's default options associated with snapshots, truncating logs, and keeping the log history have been aggressively set toward keeping BoltDB small rather than using disk IO optimally.
-
-But the larger the file, the more likely it is to have a large freelist or suddenly form one after a burst of writes. For this reason, the many of Consul's default options associated with snapshots, truncating logs, and keeping the log history aggressively keep BoltDT small rather than using disk IO more efficiently.
-
-Other reliability issues, such as [raft replication capacity issues](/consul/docs/agent/monitor/telemetry#raft-replication-capacity-issues), are much simpler to solve without the performance concerns caused by storing more logs in BoltDB.
-
-### WAL approaches storage issues differently
-
-When directly measured, WAL is more performant than BoltDB because it solves a simpler storage problem. Despite this, some users may not notice a significant performance improvement from the upgrade with the same configuration and workload. In this case, the benefit of WAL is that retaining more logs does not affect write performance. As a result, strategies for reducing disk IO with slower snapshots or for keeping logs to permit slower followers to catch up with cluster state are all possible, increasing the reliability of the deployment.
-
-## WAL quality assurance
-
-The WAL backend has been tested thoroughly during development:
-
-- Every component in the WAL, such as [metadata management](https://github.com/hashicorp/raft-wal/blob/main/types/meta.go), [log file encoding](https://github.com/hashicorp/raft-wal/blob/main/types/segment.go) to actual [file-system interaction](https://github.com/hashicorp/raft-wal/blob/main/types/vfs.go) are abstracted so unit tests can simulate difficult-to-reproduce disk failures.
-
-- We used the [application-level intelligent crash explorer (ALICE)](https://github.com/hashicorp/raft-wal/blob/main/alice/README.md) to exhaustively simulate thousands of possible crash failure scenarios. WAL correctly recovered from all scenarios.
-
-- We ran hundreds of tests in a performance testing cluster with checksum verification enabled and did not detect data loss or corruption. We will continue testing before making WAL the default backend.
-
-We are aware of how complex and critical disk-persistence is for your data.
-
-We hope that many users at different scales will try WAL in their environments after upgrading to 1.15 or later and report success or failure so that we can confidently replace BoltDB as the default for new clusters in a future release.
diff --git a/website/content/docs/agent/wal-logstore/monitoring.mdx b/website/content/docs/agent/wal-logstore/monitoring.mdx
deleted file mode 100644
index f4f81a986d27..000000000000
--- a/website/content/docs/agent/wal-logstore/monitoring.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: docs
-page_title: Monitor Raft metrics and logs for WAL
-description: >-
- Learn how to monitor Raft metrics emitted the experimental WAL (write-ahead log) LogStore backend shipped in Consul 1.15.
----
-
-# Monitor Raft metrics and logs for WAL
-
-This topic describes how to monitor Raft metrics and logs if you are testing the WAL backend. We strongly recommend monitoring the Consul cluster, especially the target server, for evidence that the WAL backend is not functioning correctly. Refer to [Enable the experimental WAL LogStore backend](/consul/docs/agent/wal-logstore/enable) for additional information about the WAL backend.
-
-!> **Upgrade warning:** The WAL LogStore backend is experimental.
-
-## Monitor for checksum failures
-
-Log store verification failures on any server, regardless of whether you are running the BoltDB or WAL backed, are unrecoverable errors. Consul may report the following errors in logs.
-
-### Read failures: Disk Corruption
-
-```log hideClipboard
-2022-11-15T22:41:23.546Z [ERROR] agent.raft.logstore: verification checksum FAILED: storage corruption rangeStart=1234 rangeEnd=3456 leaderChecksum=0xc1... readChecksum=0x45...
-```
-
-This indicates that the server read back data that is different from what it wrote to disk. This indicates corruption in the storage backend or filesystem.
-
-For convenience, Consul also increments a metric `consul.raft.logstore.verifier.read_checksum_failures` when this occurs.
-
-### Write failures: In-flight Corruption
-
-The following error indicates that the checksum on the follower did not match the leader when the follower received the logs. The error implies that the corruption happened in the network or software and not the log store:
-
-```log hideClipboard
-2022-11-15T22:41:23.546Z [ERROR] agent.raft.logstore: verification checksum FAILED: in-flight corruption rangeStart=1234 rangeEnd=3456 leaderChecksum=0xc1... followerWriteChecksum=0x45...
-```
-
-It is unlikely that this error indicates an issue with the storage backend, but you should take the same steps to resolve and report it.
-
-The `consul.raft.logstore.verifier.write_checksum_failures` metric increments when this error occurs.
-
-## Resolve checksum failures
-
-If either type of corruption is detected, complete the instructions for [reverting to BoltDB](/consul/docs/agent/wal-logstore/revert-to-boltdb). If the server already uses BoltDB, the errors likely indicate a latent bug in BoltDB or a bug in the verification code. In both cases, you should follow the revert instructions.
-
-Report all verification failures as a [GitHub
-issue](https://github.com/hashicorp/consul/issues/new?assignees=&labels=&template=bug_report.md&title=WAL:%20Checksum%20Failure).
-
-In your report, include the following:
- - Details of your server cluster configuration and hardware
- - Logs around the failure message
- - Context for how long they have been running the configuration
- - Any metrics or description of the workload you have. For example, how many raft
- commits per second. Also include the performance metrics described on this page.
-
-We recommend setting up an alert on Consul server logs containing `verification checksum FAILED` or on the `consul.raft.logstore.verifier.{read|write}_checksum_failures` metrics. The sooner you respond to a corrupt server, the lower the chance of any of the [potential risks](/consul/docs/agent/wal-logstore/enable#risks) causing problems in your cluster.
-
-## Performance metrics
-
-The key performance metrics to watch are:
-
-- `consul.raft.commitTime` measures the time to commit new writes on a quorum of
- servers. It should be the same or lower after deploying WAL. Even if WAL is
- faster for your workload and hardware, it may not be reflected in `commitTime`
- until enough followers are using WAL that the leader does not have to wait for
- two slower followers in a cluster of five to catch up.
-
-- `consul.raft.rpc.appendEntries.storeLogs` measures the time spent persisting
- logs to disk on each _follower_. It should be the same or lower for
- WAL-enabled followers.
-
-- `consul.raft.replication.appendEntries.rpc` measures the time taken for each
- `AppendEntries` RPC from the leader's perspective. If this is significantly
- higher than `consul.raft.rpc.appendEntries` on the follower, it indicates a
- known queuing issue in the Raft library and is unrelated to the backend.
- Followers with WAL enabled should not be slower than the others. You can
- determine which follower is associated with which metric by running the
- `consul operator raft list-peers` command and matching the
- `peer_id` label value to the server IDs listed.
-
-- `consul.raft.compactLogs` measures the time take to truncate the logs after a
- snapshot. WAL-enabled servers should not be slower than BoltDB servers.
-
-- `consul.raft.leader.dispatchLog` measures the time spent persisting logs to
- disk on the _leader_. It is only relevant if a WAL-enabled server becomes a
- leader. It should be the same or lower than before when the leader was using
- BoltDB.
\ No newline at end of file
diff --git a/website/content/docs/agent/wal-logstore/revert-to-boltdb.mdx b/website/content/docs/agent/wal-logstore/revert-to-boltdb.mdx
deleted file mode 100644
index 9ba6923b42db..000000000000
--- a/website/content/docs/agent/wal-logstore/revert-to-boltdb.mdx
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: docs
-page_title: Revert to BoltDB
-description: >-
- Learn how to revert Consul to the BoltDB backend after enabled the WAL (write-ahead log) LogStore backend shipped in Consul 1.15.
----
-
-# Revert storage backend to BoltDB from WAL
-
-This topic describes how to revert your Consul storage backend from the experimental WAL LogStore backend to the default BoltDB.
-
-The overall process for reverting to BoltDB consists of the following steps. Repeat the steps for all Consul servers that you need to revert.
-
-1. Stop target server gracefully.
-1. Remove data directory from target server.
-1. Update target server's configuration.
-1. Start target server.
-
-## Stop target server gracefully
-
-Stop the target server gracefully. For example, if you are using `systemd`,
-run the following command:
-
-```shell-session
-$ systemctl stop consul
-```
-
-If your environment uses configuration management automation that might interfere with this process, such as Chef or Puppet, you must disable them until you have completely reverted the storage backend.
-
-## Remove data directory from target server
-
-Temporarily moving the data directory to a different location is less destructive than deleting it. We recommend moving the data directory instead of deleted it in cases where you unsuccessfully enable WAL. Do not use the old data directory (`/data-dir/raft.bak`) for recovery after restarting the server. We recommend eventually deleting the old directory.
-
-The following example assumes the `data_dir` in the server's configuration is `/data-dir` and renames it to `/data-dir.wal.bak`.
-
-```shell-session
-$ mv /data-dir/raft /data-dir/raft.wal.bak
-```
-
-When switching backend, you must always remove _the entire raft directory_ not just the `raft.db` file or `wal` directory. This is because the log must always be consistent with the snapshots to avoid undefined behavior or data loss.
-
-## Update target server's configuration
-
-Modify the `backend` in the target server's configuration file:
-
-```hcl
-raft_logstore {
- backend = "boltdb"
- verification {
- enabled = true
- interval = "60s"
- }
-}
-```
-
-## Start target server
-
-Start the target server. For example, if you are using `systemd`, run the following command:
-
-```shell-session
-$ systemctl start consul
-```
-
-Watch for the server to become a healthy voter again.
-
-```shell-session
-$ consul operator raft list-peers
-```
-
-### Clean up old data directories
-
-If necessary, clean up any `raft.wal.bak` directories. Replace `/data-dir` with the value you specified in your configuration file.
-
-```shell-session
-$ rm /data-dir/raft.bak
-```
diff --git a/website/content/docs/architecture/anti-entropy.mdx b/website/content/docs/architecture/anti-entropy.mdx
deleted file mode 100644
index 292f5c0070d5..000000000000
--- a/website/content/docs/architecture/anti-entropy.mdx
+++ /dev/null
@@ -1,123 +0,0 @@
----
-layout: docs
-page_title: Anti-Entropy Enforcement
-description: >-
- Anti-entropy keeps distributed systems consistent. Learn how Consul uses an anti-entropy mechanism to periodically sync agent states with the service catalog to prevent the catalog from becoming stale.
----
-
-# Anti-Entropy Enforcement
-
-Consul uses an advanced method of maintaining service and health information.
-This page details how services and checks are registered, how the catalog is
-populated, and how health status information is updated as it changes.
-
-### Components
-
-It is important to first understand the moving pieces involved in services and
-health checks: the [agent](#agent) and the [catalog](#catalog). These are
-described conceptually below to make anti-entropy easier to understand.
-
-#### Agent
-
-Each Consul agent maintains its own set of service and check registrations as
-well as health information. The agents are responsible for executing their own
-health checks and updating their local state.
-
-Services and checks within the context of an agent have a rich set of
-configuration options available. This is because the agent is responsible for
-generating information about its services and their health through the use of
-[health checks](/consul/docs/services/usage/checks).
-
-#### Catalog
-
-Consul's service discovery is backed by a service catalog. This catalog is
-formed by aggregating information submitted by the agents. The catalog maintains
-the high-level view of the cluster, including which services are available,
-which nodes run those services, health information, and more. The catalog is
-used to expose this information via the various interfaces Consul provides,
-including DNS and HTTP.
-
-Services and checks within the context of the catalog have a much more limited
-set of fields when compared with the agent. This is because the catalog is only
-responsible for recording and returning information _about_ services, nodes, and
-health.
-
-The catalog is maintained only by server nodes. This is because the catalog is
-replicated via the [Raft log](/consul/docs/architecture/consensus) to provide a
-consolidated and consistent view of the cluster.
-
-### Anti-Entropy
-
-Entropy is the tendency of systems to become increasingly disordered. Consul's
-anti-entropy mechanisms are designed to counter this tendency, to keep the
-state of the cluster ordered even through failures of its components.
-
-Consul has a clear separation between the global service catalog and the agent's
-local state as discussed above. The anti-entropy mechanism reconciles these two
-views of the world: anti-entropy is a synchronization of the local agent state and
-the catalog. For example, when a user registers a new service or check with the
-agent, the agent in turn notifies the catalog that this new check exists.
-Similarly, when a check is deleted from the agent, it is consequently removed from
-the catalog as well.
-
-Anti-entropy is also used to update availability information. As agents run
-their health checks, their status may change in which case their new status
-is synced to the catalog. Using this information, the catalog can respond
-intelligently to queries about its nodes and services based on their
-availability.
-
-During this synchronization, the catalog is also checked for correctness. If
-any services or checks exist in the catalog that the agent is not aware of, they
-will be automatically removed to make the catalog reflect the proper set of
-services and health information for that agent. Consul treats the state of the
-agent as authoritative; if there are any differences between the agent
-and catalog view, the agent-local view will always be used.
-
-### Periodic Synchronization
-
-In addition to running when changes to the agent occur, anti-entropy is also a
-long-running process which periodically wakes up to sync service and check
-status to the catalog. This ensures that the catalog closely matches the agent's
-true state. This also allows Consul to re-populate the service catalog even in
-the case of complete data loss.
-
-To avoid saturation, the amount of time between periodic anti-entropy runs will
-vary based on cluster size. The table below defines the relationship between
-cluster size and sync interval:
-
-| Cluster Size | Periodic Sync Interval |
-| ------------ | ---------------------- |
-| 1 - 128 | 1 minute |
-| 129 - 256 | 2 minutes |
-| 257 - 512 | 3 minutes |
-| 513 - 1024 | 4 minutes |
-| ... | ... |
-
-The intervals above are approximate. Each Consul agent will choose a randomly
-staggered start time within the interval window to avoid a thundering herd.
-
-### Best-effort sync
-
-Anti-entropy can fail in a number of cases, including misconfiguration of the
-agent or its operating environment, I/O problems (full disk, filesystem
-permission, etc.), networking problems (agent cannot communicate with server),
-among others. Because of this, the agent attempts to sync in best-effort
-fashion.
-
-If an error is encountered during an anti-entropy run, the error is logged and
-the agent continues to run. The anti-entropy mechanism is run periodically to
-automatically recover from these types of transient failures.
-
-### Enable Tag Override
-
-Synchronization of service registration can be partially modified to
-allow external agents to change the tags for a service. This can be
-useful in situations where an external monitoring service needs to be
-the source of truth for tag information. For example, the Redis
-database and its monitoring service Redis Sentinel have this kind of
-relationship. Redis instances are responsible for much of their
-configuration, but Sentinels determine whether the Redis instance is a
-primary or a secondary. Enable the
-[`enable_tag_override`](/consul/docs/services/configuration/services-configuration-reference#enable_tag_override) parameter in your service definition file to tell the Consul agent where the Redis database is running to bypass
-tags during anti-entropy synchronization. Refer to
-[Modify anti-entropy synchronization](/consul/docs/services/usage/define-services#modify-anti-entropy-synchronization) for additional information.
diff --git a/website/content/docs/architecture/backend.mdx b/website/content/docs/architecture/backend.mdx
new file mode 100644
index 000000000000..d765ece700d1
--- /dev/null
+++ b/website/content/docs/architecture/backend.mdx
@@ -0,0 +1,31 @@
+---
+layout: docs
+page_title: Persistent backend architecture
+description: >-
+ Consul persists the Raft index, which logs cluster activities, with the Write-ahead log (WAL) LogStore backend. Consul saves the Raft index in the server's data directory.
+---
+
+# Persistent data backend architecture
+
+This page introduces the architecture of the backend that Consul server agents use to store Raft index data.
+
+## Raft index
+
+Consul uses the Raft protocol to manage [server consensus](/consul/docs/concept/consensus), to maintain a [reliable, fault-tolerant](/consul/docs/concept/reliability) state across all servers. This consensus mechanism ensures [consistent service discovery and health monitoring](/consul/docs/concept/consistency), even when individual servers fail or become temporarily disconnected from the cluster.
+
+The Raft index provides a record of the cluster's state. It tracks interactions between Consul servers as they conduct elections, register service instances into [the Consul catalog](/consul/docs/concept/catalog), and update the catalog with the results of service node health checks.
+
+You can also use Consul's [snapshot agent](/consul/commands/snapshot/agent) to save a copy of the entire Raft index. This snapshot lets you restore a datacenter from a backup in the event of an outage or catastrophic failure. You can save snapshot to a cloud storage bucket to ensure data persistence.
+
+## Data directory
+
+Consul writes the Raft index to the data directory specified in the agent configuration with the `data_dir` parameter or `-data_dir` CLI flag. The data directory is a requirement for all agents, and should be durable across reboots.
+
+## Write-ahead log (WAL) LogStore backend
+
+Consul logs the Raft index with the write-ahead log (WAL) LogStore backend. The WAL backend implements a traditional log with rotating, append-only log files, and it retains logs without affecting a cluster's write performance at scale.
+
+Previous versions of Consul used BoltDB as the default LogStore backend. Refer
+to the [WAL LogStore backend overview](/consul/docs/deploy/server/wal) for more
+information. To use BoltDB instead of WAL, refer to [Revert storage backend to
+BoltDB from WAL](/consul/docs/deploy/server/wal/revert-boltdb).
diff --git a/website/content/docs/architecture/capacity-planning.mdx b/website/content/docs/architecture/capacity-planning.mdx
deleted file mode 100644
index 2f80c4cf289a..000000000000
--- a/website/content/docs/architecture/capacity-planning.mdx
+++ /dev/null
@@ -1,188 +0,0 @@
----
-layout: docs
-page_title: Consul capacity planning
-description: >-
- Learn how to maintain your Consul cluster in a healthy state by provisioning the correct resources.
----
-
-# Consul capacity planning
-
-This page describes our capacity planning recommendations when deploying and maintaining a Consul cluster in production. When your organization designs a production environment, you should consider your available resources and their impact on network capacity.
-
-## Introduction
-
-It is important to select the correct size for your server instances. Consul server environments have a standard set of minimum requirements. However, these requirements may vary depending on what you are using Consul for.
-
-Insufficient resource allocations may cause network issues or degraded performance in general. When a slowdown in performance results in a Consul leader node that is unable to respond to requests in sufficient time, the Consul cluster triggers a new leader election. Consul pauses all network requests and Raft updates until the election ends.
-
-## Hardware requirements
-
-The minimum hardware requirements for Consul servers in production clusters as recommended by the [reference architecture](/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers) are:
-
-| CPU | Memory | Disk Capacity | Disk IO | Disk Throughput | Avg Round-Trip-Time | 99% Round-Trip-Time |
-| --------- | ------------ | ------------- | ----------- | --------------- | ------------------- | ------------------- |
-| 8-16 core | 32-64 GB RAM | 200+ GB | 7500+ IOPS | 250+ MB/s | Lower than 50ms | Lower than 100ms |
-
-For the major cloud providers, we recommend starting with one of the following instances that meet the minimum requirements. Then scale up as needed. We also recommend avoiding "burstable" CPU and storage options where performance may drop after a consistent load.
-
-| Provider | Size | Instance/VM Types | Disk Volume Specs |
-| --------- | ----- | ------------------------------------- | --------------------------------- |
-| **AWS** | Large | `m5.2xlarge`, `m5.4xlarge` | 200+GB `gp3`, 10000 IOPS, 250MB/s |
-| **Azure** | Large | `Standard_D8s_v3`, `Standard_D16s_v3` | 2048GB `Premium SSD`, 7500 IOPS, 200MB/s |
-| **GCP** | Large | `n2-standard-8`, `n2-standard-16` | 1000GB `pd-ssd`, 30000 IOPS, 480MB/s |
-
-
-For HCP Consul Dedicated, cluster size is measured in the number of service instances supported. Find out more information in the [HCP Consul Dedicated pricing page](https://cloud.hashicorp.com/products/consul/pricing).
-
-## Workload input and output requirements
-
-Workloads are any actions that interact with the Consul cluster. These actions consist of key/value reads and writes, service registrations and deregistrations, adding or removing Consul client agents, and more.
-
-Input/output operations per second (IOPS) is a unit of measurement for the amount of reads and writes to non-adjacent storage locations.
-For high workloads, ensure that the Consul server disks support a [high number of IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html#ebs-io-iops) to keep up with the rapid Raft log update rate.
-Unlike bare-metal environments, IOPS for virtual instances in cloud environments is often tied to storage sizing. More storage GBs typically grants you more IOPS. Therefore, we recommend deploying on [IOPS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/provisioned-iops.html).
-
-Consul server agents are generally I/O bound for writes and CPU bound for reads. For additional tuning recommendations, refer to [raft tuning](#raft-tuning).
-
-## Memory requirements
-
-You should allocate RAM for server agents so that they contain 2 to 4 times the working set size. You can determine the working set size of a running cluster by noting the value of `consul.runtime.alloc_bytes` in the leader node's telemetry data. Inspect your monitoring solution for the telemetry value, or run the following commands with the [jq](https://stedolan.github.io/jq/download/) tool installed on your Consul leader instance.
-
-
-
-For Kubernetes, execute the command from the leader pod. `jq` is available in the Consul server containers.
-
-
-
-Set `$CONSUL_HTTP_TOKEN` to an ACL token with valid permissions, then retrieve the working set size.
-
-```shell-session
-$ curl --silent --header "X-Consul-Token: $CONSUL_HTTP_TOKEN" http://127.0.0.1:8500/v1/agent/metrics | jq '.Gauges[] | select(.Name=="consul.runtime.alloc_bytes") | .Value'`
-616017920
-```
-
-## Kubernetes storage requirements
-
-When you set up persistent volumes (PV) resources, you should define the correct server storage class parameter because the defaults are likely insufficient in performance. To set the [storageClass Helm chart parameter](/consul/docs/k8s/helm#v-server-storageclass), refer to the [Kubernetes documentation on storageClasses](https://kubernetes.io/docs/concepts/storage/storage-classes/) for more information about your specific cloud provider.
-
-## Read and write heavy workload recommendations
-
-In production, your use case may lead to Consul performing read-heavy workloads, write-heavy workloads, or both. Refer to the following table for specific resource recommendations for these types of workloads.
-
-| Workload type | Instance Recommendations | Workload element examples | Enterprise Feature Recommendations |
-| ------------- | ------------------------- | ------------------------ | ------------------------ |
-| Read-heavy | Instances of type `m5.4xlarge (AWS)`, `Standard_D16s_v3 (Azure)`, `n2-standard-16 (GCP)` | Raft RPCs calls, DNS queries, key/value retrieval | [Read replicas](/consul/docs/enterprise/read-scale) |
-| Write-heavy | IOPS performance of `10 000+` | Consul agent joins and leaves, services registration and deregistration, key/value writes | [Network segments](/consul/docs/enterprise/network-segments/network-segments-overview) |
-
-For recommendations on troubleshooting issues with read-heavy or write-heavy workloads, refer to [Consul at Scale](/consul/docs/architecture/scale#resource-usage-and-metrics-recommendations).
-
-## Monitor performance
-
-Monitoring is critical to ensure that your Consul datacenter has sufficient resources to continue operations. A proactive monitoring strategy helps you find problems in your network before they impact your deployments.
-
-We recommend completing the [Monitor Consul server health and performance with metrics and logs](/consul/tutorials/observe-your-network/server-metrics-and-logs) tutorial as a starting point for Consul metrics and telemetry. The following tutorials guide you through specific monitoring solutions for your Consul cluster.
-
-- [Monitor Consul server health and performance with metrics and logs](/consul/tutorials/observe-your-network/server-metrics-and-logs)
-- [Observe Consul service mesh traffic](/consul/tutorials/get-started-kubernetes/kubernetes-gs-observability)
-
-### Important metrics
-
-In production environments, create baselines for your Consul cluster's metrics. After you discover the baselines, you will be able to define alerts and receive notifications when there are unexpected values. For a detailed explanation on the metrics and their values, refer to [Consul Agent telemetry](/consul/docs/agent/telemetry).
-
-### Transaction metrics
-
-These metrics indicate how long it takes to complete write operations in various parts of the Consul cluster.
-
-- [`consul.kvs.apply`](/consul/docs/agent/monitor/telemetry#transaction-timing) measures the time it takes to complete an update to the KV store.
-- [`consul.txn.apply`](/consul/docs/agent/monitor/telemetry#transaction-timing) measures the time spent applying a transaction operation.
-- [`consul.raft.apply`](/consul/docs/agent/monitor/telemetry#transaction-timing) counts the number of Raft transactions applied during the measurement interval. This metric is only reported on the leader.
-- [`consul.raft.commitTime`](/consul/docs/agent/monitor/telemetry#transaction-timing) measures the time it takes to commit a new entry to the Raft log on disk on the leader.
-
-### Memory metrics
-
-These performance indicators can help you diagnose if the current instance sizing is unable to handle the workload.
-
-- [`consul.runtime.alloc_bytes`](/consul/docs/agent/monitor/telemetry#memory-usage) measures the number of bytes allocated by the Consul process.
-- [`consul.runtime.sys_bytes`](/consul/docs/agent/monitor/telemetry#memory-usage) measures the total number of bytes of memory obtained from the OS.
-- [`consul.runtime.heap_objects`](/consul/docs/agent/monitor/telemetry#metrics-reference) measures the number of objects allocated on the heap and is a general memory pressure indicator.
-
-### Leadership metrics
-
-Leadership changes are not a cause for concern but frequent changes may be a symptom of a deeper problem. Frequent elections or leadership changes may indicate network issues between the Consul servers, or the Consul servers are unable to keep up with the load.
-
-- [`consul.raft.leader.lastContact`](/consul/docs/agent/monitor/telemetry#leadership-changes) measures the time since the leader was last able to contact the follower nodes when checking its leader lease.
-- [`consul.raft.state.candidate`](/consul/docs/agent/monitor/telemetry#leadership-changes) increments whenever a Consul server starts an election.
-- [`consul.raft.state.leader`](/consul/docs/agent/monitor/telemetry#leadership-changes) increments whenever a Consul server becomes a leader.
-- [`consul.server.isLeader`](/consul/docs/agent/monitor/telemetry#leadership-changes) tracks whether a server is a leader.
-
-### Network metrics
-
-Network activity and RPC count measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. If an unusually high RPC count occurs, you should investigate before it overloads the cluster.
-
-- [`consul.client.rpc`](/consul/docs/agent/monitor/telemetry#network-activity-rpc-count) increments whenever a Consul agent in client mode makes an RPC request to a Consul server.
-- [`consul.client.rpc.exceeded`](/consul/docs/agent/monitor/telemetry#network-activity-rpc-count) increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.
-- [`consul.client.rpc.failed`](/consul/docs/agent/monitor/telemetry#network-activity-rpc-count) increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.
-
-## Network constraints and alternate approaches
-
-If it is impossible for you to allocate the required resources, you can make changes to Consul's performance so that it operates with lower speed or resilience. These changes ensure that your cluster remains within its resource capacity.
-
-- Soft limits prevent your cluster from degrading due to overload.
-- Raft tuning lets you compensate for unfavorable environments.
-
-### Soft limits
-
-The recommended maximum size for a single datacenter is 5,000 Consul client agents. This recommendation is based on a standard, non-tuned environment and considers a blast radius's risk management factor. The maximum number of agents may be lower, depending on how you use Consul.
-
-If you require more than 5,000 client agents, you should break up the single Consul datacenter into multiple smaller datacenters.
-
-- When the nodes are spread across separate physical locations such as different regions, you can model multiple datacenter structures based on physical locations.
-- Use [network segments](/consul/docs/enterprise/network-segments/network-segments-overview) in a single available zone or region to lower overall resource usage in a single datacenter.
-
-When deploying [Consul in Kubernetes](/consul/docs/k8s), we recommend you set both _requests_ and _limits_ in the Helm chart. Refer to the [Helm chart documentation](/consul/docs/k8s/helm#v-server-resources) for more information.
-
-- Requests allocate the required resources for your Consul workloads.
-- Limits prevent your pods from being terminated and restarted if they consume more resources than requested and Kubernetes needs to reclaim these resources. Limits can prevent outage situations where the Consul leader's container gets terminated and redeployed due to resource constraints.
-
-The following is an example Helm configuration that allocates 16 CPU cores and 64 gigabytes of memory:
-
-
-
-```yaml
-global:
- image: "hashicorp/consul"
-## ...
-resources:
- requests:
- memory: '64G'
- cpu: '16000m'
- limits:
- memory: '64G'
- cpu: '16000m'
-```
-
-
-
-### Raft tuning
-
-Consul uses the [Raft consensus algorithm](/consul/docs/architecture/consensus) to provide consistency.
-You may need to adjust Raft to suit your specific environment. Adjust the [`raft_multiplier` configuration](/consul/docs/agent/config/config-files#raft_multiplier) to define the trade-off between leader stability and time to recover from a leader failure.
-
-- A lower multiplier minimizes failure detection and election time, but it may trigger frequently in high latency situations.
-- A higher multiplier reduces the chances that failures cause leadership churn, but your cluster takes longer to detect real failures and restore availability.
-
-The value of `raft_multiplier` has a default value of 5. It is a scaling factor setting that directly affects the following parameters:
-
-| Parameter name | Default value | Derived from |
-| --- | --- | --- |
-| HeartbeatTimeout | 5000ms | 5 x 1000ms |
-| ElectionTimeout | 5000ms | 5 x 1000ms |
-| LeaderLeaseTimeout | 2500ms | 5 x 500ms |
-
-You can use the telemetry from [`consul.raft.leader.lastContact`](/consul/docs/agent/telemetry#leadership-changes) to observe Raft timing performance.
-
-Wide networks with more latency perform better with larger values of `raft_multiplier`, but cluster failure detection will take longer. If your network operates with low latency, we recommend that you do not set the Raft multiplier higher than 5. Instead, you should either replace the servers with more powerful ones or minimize the network latency between nodes.
-
-We recommend you start from a baseline and perform [chaos engineering testing](/consul/tutorials/resiliency/introduction-chaos-engineering?in=consul%2Fresiliency) with different values for the Raft multiplier to find the acceptable time for problem detection and recovery for the cluster. Then scale the cluster and its dedicated resources with the number of workloads handled. This approach gives you the best balance between pure resource growth and pure Raft tuning strategies because it lets you use Raft tuning as a backup plan if you cannot scale your resources.
-
-The types of workloads the Consul cluster handles also play an important role in Raft tuning. For example, if your Consul clusters are mostly static and do not handle many events, you should increase your Raft multiplier instead of scaling your resources because the risk of an important event happening while the cluster is converging or re-electing a leader is lower.
diff --git a/website/content/docs/architecture/catalog.mdx b/website/content/docs/architecture/catalog.mdx
deleted file mode 100644
index dad1ef9aceb7..000000000000
--- a/website/content/docs/architecture/catalog.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: docs
-page_title: v1 Catalog API
-description: Learn about version 1 of the Consul catalog, including what Consul servers record when they register a service.
----
-
-# v1 Catalog API
-
-This topic provides conceptual information about version 1 (v1) of the Consul catalog API. The catalog tracks registered services and their locations for both service discovery and service mesh use cases.
-
-For more information about the information returned when querying the catalog, including filtering options when querying the catalog for a list of nodes, services, or gateways, refer to the [`/catalog` endpoint reference in the HTTP API documentation](/consul/api-docs/catalog).
-
-## Introduction
-
-Consul tracks information about registered services through its catalog API. This API records user-defined information about the external services, such as their partitions and required health checks. It also records information that Consul assigns for its own operations, such as an ID for each service instance and the [Raft indices](/consul/docs/architecture/consensus) when the instance is registered and modified.
-
-### v2 Catalog
-
-Consul introduced an experimental v2 Catalog API in v1.17.0. This API supported multi-port Service configurations on Kubernetes, and it was made available for testing and development purposes. The v2 catalog and its support for multiport Kubernetes Services were deprecated in the v1.19.0 release.
-
-## Catalog structure
-
-When Consul registers a service instance using the v1 catalog API, it records the following information about each instance:
-
-| v1 Catalog field | Description | Source |
-| :--------------- | :---------- | :----- |
-| ID | A unique identifier for a service instance. | Defined by user in [service definition](/consul/docs/services/configuration/services-configuration-reference#id). |
-| Node | The connection point where the service is available. | On VMs, defined by user.
On Kubernetes, computed by Consul according to [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/). |
-| Address | The registered address of the service instance. | Defined by user in [service definition](/consul/docs/services/configuration/services-configuration-reference#address). |
-| Tagged Addresses | User-defined labels for addresses. | Defined by user in [service definition](/consul/docs/services/configuration/services-configuration-reference#tagged_addresses). |
-| NodeMeta | User-defined metadata about the node. | Defined by user |
-| Datacenter | The name of the datacenter the service is registered in. | Defined by user |
-| Service | The name of the service Consul registers the service instance under. | Defined by user |
-| Agent Check | The health checks defined for a service instance managed by a Consul client agent. | Computed by Consul |
-| Health Checks | The health checks defined for the service. Refer to [define health checks](/consul/docs/services/usage/checks) for more information. | Defined by user |
-| Partition | The name of the admin partition the service is registered in. Refer to [admin partitions](/consul/docs/enterprise/admin-partitions) for more information. | Defined by user |
-| Locality | Region and availability zone of the service. Refer to [`locality`](/consul/docs/agent/config/config-files#locality) for more information. | Defined by user |
-
-Depending on the configuration entries or custom resource definitions you apply to your Consul installation, additional information such as [proxy default behavior](/consul/docs/connect/config-entries/proxy-defaults) is automatically recorded to the catalog for services. You can return this information using the [`/catalog` HTTP API endpoint](/consul/api-docs/catalog).
diff --git a/website/content/docs/architecture/consensus.mdx b/website/content/docs/architecture/consensus.mdx
deleted file mode 100644
index 9e10a05e571d..000000000000
--- a/website/content/docs/architecture/consensus.mdx
+++ /dev/null
@@ -1,223 +0,0 @@
----
-layout: docs
-page_title: Consensus Protocol | Raft
-description: >-
- Consul ensures a consistent state using the Raft protocol. A quorum, or a majority of server agents with one leader, agree to state changes before committing to the state log. Learn how Raft works in Consul to ensure state consistency and how that state can be read with different consistency modes to balance read latency and consistency.
----
-
-# Consensus Protocol
-
-Consul uses a [consensus protocol]()
-to provide [Consistency (as defined by CAP)](https://en.wikipedia.org/wiki/CAP_theorem).
-The consensus protocol is based on
-["Raft: In search of an Understandable Consensus Algorithm"](https://raft.github.io/raft.pdf).
-For a visual explanation of Raft, see [The Secret Lives of Data](http://thesecretlivesofdata.com/raft).
-
-## Raft Protocol Overview
-
-Raft is a consensus algorithm that is based on
-[Paxos](https://en.wikipedia.org/wiki/Paxos_%28computer_science%29). Compared
-to Paxos, Raft is designed to have fewer states and a simpler, more
-understandable algorithm.
-
-There are a few key terms to know when discussing Raft:
-
-- Log - The primary unit of work in a Raft system is a log entry. The problem
- of consistency can be decomposed into a _replicated log_. A log is an ordered
- sequence of entries. Entries includes any cluster change: adding nodes, adding services, new key-value pairs, etc. We consider the log consistent
- if all members agree on the entries and their order.
-
-- FSM - [Finite State Machine](https://en.wikipedia.org/wiki/Finite-state_machine).
- An FSM is a collection of finite states with transitions between them. As new logs
- are applied, the FSM is allowed to transition between states. Application of the
- same sequence of logs must result in the same state, meaning behavior must be deterministic.
-
-- Peer set - The peer set is the set of all members participating in log replication.
- For Consul's purposes, all server nodes are in the peer set of the local datacenter.
-
-- Quorum - A quorum is a majority of members from a peer set: for a set of size `N`,
- quorum requires at least `(N/2)+1` members.
- For example, if there are 5 members in the peer set, we would need 3 nodes
- to form a quorum. If a quorum of nodes is unavailable for any reason, the
- cluster becomes _unavailable_ and no new logs can be committed.
-
-- Committed Entry - An entry is considered _committed_ when it is durably stored
- on a quorum of nodes. Once an entry is committed it can be applied.
-
-- Leader - At any given time, the peer set elects a single node to be the leader.
- The leader is responsible for ingesting new log entries, replicating to followers,
- and managing when an entry is considered committed.
-
-Raft is a complex protocol and will not be covered here in detail (for those who
-desire a more comprehensive treatment, the full specification is available in this
-[paper](https://raft.github.io/raft.pdf)).
-We will, however, attempt to provide a high level description which may be useful
-for building a mental model.
-
-Raft nodes are always in one of three states: follower, candidate, or leader. All
-nodes initially start out as a follower. In this state, nodes can accept log entries
-from a leader and cast votes. If no entries are received for some time, nodes
-self-promote to the candidate state. In the candidate state, nodes request votes from
-their peers. If a candidate receives a quorum of votes, then it is promoted to a leader.
-The leader must accept new log entries and replicate to all the other followers.
-In addition, if stale reads are not acceptable, all queries must also be performed on
-the leader.
-
-Once a cluster has a leader, it is able to accept new log entries. A client can
-request that a leader append a new log entry (from Raft's perspective, a log entry
-is an opaque binary blob). The leader then writes the entry to durable storage and
-attempts to replicate to a quorum of followers. Once the log entry is considered
-_committed_, it can be _applied_ to a finite state machine. The finite state machine
-is application specific; in Consul's case, we use
-[MemDB](https://github.com/hashicorp/go-memdb) to maintain cluster state. Consul's writes
-block until it is both _committed_ and _applied_. This achieves read after write semantics
-when used with the [consistent](/consul/api-docs/features/consistency#consistent) mode for queries.
-
-Obviously, it would be undesirable to allow a replicated log to grow in an unbounded
-fashion. Raft provides a mechanism by which the current state is snapshotted and the
-log is compacted. Because of the FSM abstraction, restoring the state of the FSM must
-result in the same state as a replay of old logs. This allows Raft to capture the FSM
-state at a point in time and then remove all the logs that were used to reach that
-state. This is performed automatically without user intervention and prevents unbounded
-disk usage while also minimizing time spent replaying logs. One of the advantages of
-using MemDB is that it allows Consul to continue accepting new transactions even while
-old state is being snapshotted, preventing any availability issues.
-
-Consensus is fault-tolerant up to the point where quorum is available.
-If a quorum of nodes is unavailable, it is impossible to process log entries or reason
-about peer membership. For example, suppose there are only 2 peers: A and B. The quorum
-size is also 2, meaning both nodes must agree to commit a log entry. If either A or B
-fails, it is now impossible to reach quorum. This means the cluster is unable to add
-or remove a node or to commit any additional log entries. This results in
-_unavailability_. At this point, manual intervention would be required to remove
-either A or B and to restart the remaining node in bootstrap mode.
-
-A Raft cluster of 3 nodes can tolerate a single node failure while a cluster
-of 5 can tolerate 2 node failures. The recommended configuration is to either
-run 3 or 5 Consul servers per datacenter. This maximizes availability without
-greatly sacrificing performance. The [deployment table](#deployment_table) below
-summarizes the potential cluster size options and the fault tolerance of each.
-
-In terms of performance, Raft is comparable to Paxos. Assuming stable leadership,
-committing a log entry requires a single round trip to half of the cluster.
-Thus, performance is bound by disk I/O and network latency. Although Consul is
-not designed to be a high-throughput write system, it should handle on the order
-of hundreds to thousands of transactions per second depending on network and
-hardware configuration.
-
-## Raft in Consul
-
-Only Consul server nodes participate in Raft and are part of the peer set. All
-client nodes forward requests to servers. Part of the reason for this design is
-that, as more members are added to the peer set, the size of the quorum also increases.
-This introduces performance problems as you may be waiting for hundreds of machines
-to agree on an entry instead of a handful.
-
-When getting started, a single Consul server is put into "bootstrap" mode. This mode
-allows it to self-elect as a leader. Once a leader is elected, other servers can be
-added to the peer set in a way that preserves consistency and safety. Eventually,
-once the first few servers are added, bootstrap mode can be disabled. See [this
-document](/consul/docs/install/bootstrapping) for more details.
-
-Since all servers participate as part of the peer set, they all know the current
-leader. When an RPC request arrives at a non-leader server, the request is
-forwarded to the leader. If the RPC is a _query_ type, meaning it is read-only,
-the leader generates the result based on the current state of the FSM. If
-the RPC is a _transaction_ type, meaning it modifies state, the leader
-generates a new log entry and applies it using Raft. Once the log entry is committed
-and applied to the FSM, the transaction is complete.
-
-Because of the nature of Raft's replication, performance is sensitive to network
-latency. For this reason, each datacenter elects an independent leader and maintains
-a disjoint peer set. Data is partitioned by datacenter, so each leader is responsible
-only for data in their datacenter. When a request is received for a remote datacenter,
-the request is forwarded to the correct leader. This design allows for lower latency
-transactions and higher availability without sacrificing consistency.
-
-## Consistency Modes
-
-Although all writes to the replicated log go through Raft, reads are more
-flexible. To support various trade-offs that developers may want, Consul
-supports 3 different consistency modes for reads.
-
-The three read modes are:
-
-- `default` - Raft makes use of leader leasing, providing a time window
- in which the leader assumes its role is stable. However, if a leader
- is partitioned from the remaining peers, a new leader may be elected
- while the old leader is holding the lease. This means there are 2 leader
- nodes. There is no risk of a split-brain since the old leader will be
- unable to commit new logs. However, if the old leader services any reads,
- the values are potentially stale. The default consistency mode relies only
- on leader leasing, exposing clients to potentially stale values. We make
- this trade-off because reads are fast, usually strongly consistent, and
- only stale in a hard-to-trigger situation. The time window of stale reads
- is also bounded since the leader will step down due to the partition.
-
-- `consistent` - This mode is strongly consistent without caveats. It requires
- that a leader verify with a quorum of peers that it is still leader. This
- introduces an additional round-trip to all server nodes. The trade-off is
- always consistent reads but increased latency due to the extra round trip.
-
-- `stale` - This mode allows any server to service the read regardless of whether
- it is the leader. This means reads can be arbitrarily stale but are generally
- within 50 milliseconds of the leader. The trade-off is very fast and scalable
- reads but with stale values. This mode allows reads without a leader meaning
- a cluster that is unavailable will still be able to respond.
-
-For more documentation about using these various modes, see the
-[HTTP API](/consul/api-docs/features/consistency).
-
-## Deployment Table ((#deployment_table))
-
-Below is a table that shows quorum size and failure tolerance for various
-cluster sizes. The recommended deployment is either 3 or 5 servers. A single
-server deployment is _**highly**_ discouraged as data loss is inevitable in a
-failure scenario.
-
-
-
-
-
Servers
-
Quorum Size
-
Failure Tolerance
-
-
-
-
-
1
-
1
-
0
-
-
-
2
-
2
-
0
-
-
-
3
-
2
-
1
-
-
-
4
-
3
-
1
-
-
-
5
-
3
-
2
-
-
-
6
-
4
-
2
-
-
-
7
-
4
-
3
-
-
-
diff --git a/website/content/docs/architecture/control-plane/dataplane.mdx b/website/content/docs/architecture/control-plane/dataplane.mdx
new file mode 100644
index 000000000000..dfaaa142fa3e
--- /dev/null
+++ b/website/content/docs/architecture/control-plane/dataplane.mdx
@@ -0,0 +1,163 @@
+---
+layout: docs
+page_title: Consul dataplane
+description: >-
+ Consul Dataplane removes the need to run a client agent for service discovery and service mesh by leveraging orchestrator functions. Learn about Consul Dataplane, how it can lower latency for Consul on Kubernetes and AWS ECS, and how it enables Consul support for AWS Fargate and GKE Autopilot.
+---
+
+# Consul dataplane
+
+This topic provides an overview of Consul dataplane, a lightweight process for managing Envoy proxies. Consul dataplanes remove the need to run client agents on every node in a cluster for service discovery and service mesh. Instead, Consul deploys sidecar proxies that provide lower latency, support additional runtimes, and integrate with cloud infrastructure providers.
+
+## Supported environments
+
+- Dataplanes can connect to Consul servers v1.14.0 and newer.
+- Dataplanes on Kubernetes requires Consul K8s v1.0.0 and newer.
+- Dataplanes on AWS Elastic Container Services (ECS) requires Consul ECS v0.7.0 and newer.
+
+## What is Consul dataplane?
+
+When deployed to virtual machines or bare metal environments, the Consul control plane requires _server agents_ and _client agents_. Server agents maintain the service catalog and service mesh, including its security and consistency, while client agents manage communications between service instances, their sidecar proxies, and the servers. While this model is optimal for applications deployed on virtual machines or bare metal servers, orchestrators such as Kubernetes and ECS have native components that support health checking and service location functions typically provided by the client agent.
+
+Consul dataplane manages Envoy proxies and leaves responsibility for other functions to the orchestrator. As a result, it removes the need to run client agents on every node. In addition, services no longer need to be reregistered to a local client agent after restarting a service instance, as a client agent’s lack of access to persistent data storage in container-orchestrated deployments is no longer an issue.
+
+The following diagram shows how Consul dataplanes facilitate service mesh in a Kubernetes-orchestrated environment.
+
+
+
+### Impact on performance
+
+ConsuldDataplanes replace node-level client agents and function as sidecars attached to each service instance. Dataplanes handle communication between Consul servers and Envoy proxies, using fewer resources than client agents. Consul servers need to consume additional resources in order to generate xDS resources for Envoy proxies.
+
+As a result, small deployments require fewer overall resources. For especially large deployments or deployments that expect to experience high levels of churn, consider the following impacts to your network's performance:
+
+1. In our internal tests, which used 5000 proxies and services flapping every 2 seconds, additional CPU utilization remained under 10% on the control plane.
+1. As you deploy more services, the resource usage for dataplanes grows on a linear scale.
+1. Envoy reconfigurations are rate limited to prevent excessive configuration changes from generating significant load on the servers.
+1. To avoid generating significant load on an individual server, proxy configuration is load balanced proactively.
+1. The frequency of the orchestrator's liveness and readiness probes determine how quickly Consul's control plane can become aware of failures. There is no impact on service mesh applications, however, as Envoy proxies have a passive ability to detect endpoint failure and steer traffic to healthy instances.
+
+## Benefits
+
+**Fewer networking requirements**: Without client agents, Consul does not require bidirectional network connectivity across multiple protocols to enable gossip communication. Instead, it requires a single gRPC connection to the Consul servers, which significantly simplifies requirements for the operator.
+
+**Simplified set up**: Because there are no client agents to engage in gossip, you do not have to generate and distribute a gossip encryption key to agents during the initial bootstrapping process. Securing agent communication also becomes simpler, with fewer tokens to track, distribute, and rotate.
+
+**Additional environment and runtime support**: Consul on Kubernetes versions _prior_ to v1.0 (Consul v1.14) require the use of hostPorts and DaemonSets for client agents, which limits Consul’s ability to be deployed in environments where those features are not supported.
+As of Consul on Kubernetes version 1.0 (Consul 1.14), `hostPorts` are no longer required and Consul now supports AWS Fargate and GKE Autopilot.
+
+**Easier upgrades**: With Consul dataplane, updating Consul to a new version no longer requires upgrading client agents. Consul Dataplane also has better compatibility across Consul server versions, so the process to upgrade Consul servers becomes easier.
+
+## Get started
+
+To get started with Consul dataplane, use the following reference resources:
+
+- For `consul-dataplane` commands and usage examples, including required flags for startup, refer to the [`consul-dataplane` CLI reference](/consul/docs/reference/dataplane/cli).
+- For Helm chart information, refer to the [Helm Chart reference](/consul/docs/reference/k8s/helm).
+- For Envoy, Consul, and Consul Dataplane version compatibility, refer to the [Envoy compatibility matrix](/consul/docs/reference/proxy/envoy).
+- For Consul on ECS workloads, refer to [Consul on AWS Elastic Container Service (ECS) Overview](/consul/docs/ecs).
+
+## Installation
+
+
+
+
+
+To install Consul dataplane, set `VERSION` to `1.0.0` or higher and then follow the instructions to install a specific version of Consul [with the Helm Chart](/consul/docs/k8s/installation/install#install-consul) or [with the Consul-k8s CLI](/consul/docs/k8s/installation/install-cli#install-a-previous-version).
+
+### Helm
+
+```shell-session
+$ export VERSION=1.0.0
+$ helm install consul hashicorp/consul --set global.name=consul --version ${VERSION} --create-namespace --namespace consul
+```
+
+### Consul-k8s CLI
+
+```shell-session
+$ export VERSION=1.0.0 && \
+ curl --location "https://releases.hashicorp.com/consul-k8s/${VERSION}/consul-k8s_${VERSION}_darwin_amd64.zip" --output consul-k8s-cli.zip
+```
+
+
+
+
+Refer to the following documentation for Consul on ECS workloads:
+
+- [Deploy Consul with the Terraform module](/consul/docs/register/service/ecs/)
+- [Deploy Consul manually](/consul/docs/register/service/ecs/manual)
+
+
+
+
+
+### Namespace ACL permissions
+
+If ACLs are enabled, exported services between partitions that use dataplanes may experience errors when you define namespace partitions with the `*` wildcard. Consul dataplanes use a token with the `builtin/service` policy attached, but this policy does not include access to all namespaces.
+
+Add the following policies to the service token attached to Consul dataplanes to grant Consul access to exported services across all namespaces:
+
+```hcl
+partition "default" {
+ namespace "default" {
+ query_prefix "" {
+ policy = "read"
+ }
+ }
+}
+
+partition_prefix "" {
+ namespace_prefix "" {
+ node_prefix "" {
+ policy = "read"
+ }
+ service_prefix "" {
+ policy = "read"
+ }
+ }
+}
+```
+
+## Upgrade dataplane version
+
+
+
+
+
+Before you upgrade Consul to a version that uses Consul dataplane, you must edit your Helm chart so that client agents are removed from your deployments. Refer to [upgrading to Consul Dataplane](/consul/docs/k8s/upgrade#upgrading-to-consul-dataplanes) for more information.
+
+
+
+
+
+Refer to [Upgrade to dataplane architecture](/consul/docs/upgrade/ecs/dataplane) for instructions.
+
+
+
+
+
+## Feature support
+
+Consul dataplanes on Kubernetes supports the following features:
+
+- Single and multi-cluster installations, including those with WAN federation, cluster peering, and admin partitions are supported.
+- Ingress, terminating, and mesh gateways are supported.
+- Running Consul service mesh in AWS Fargate and GKE Autopilot is supported.
+- xDS load balancing is supported.
+- Servers running in Kubernetes and servers external to Kubernetes are both supported.
+- HCP Consul Dedicated is supported.
+- Consul API Gateway
+
+Consul dataplanes on ECS support the following features:
+
+- Single and multi-cluster installations, including those with WAN federation, cluster peering, and admin partitions
+- Mesh gateways
+- Running Consul service mesh in AWS Fargate and EC2
+- xDS load balancing
+- Self-managed Enterprise and HCP Consul Dedicated servers
+
+## Technical constraints and limitations
+
+- Consul Dataplane is not supported on Windows.
+- Consul Dataplane requires the `NET_BIND_SERVICE` capability. Refer to [Set capabilities for a Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container) in the Kubernetes Documentation for more information.
+- When ACLs are enabled, dataplanes use the [service token](/consul/docs/security/acl/tokens/create/create-a-service-token) and the `builtin/service` policy for their default permissions.
diff --git a/website/content/docs/architecture/control-plane/index.mdx b/website/content/docs/architecture/control-plane/index.mdx
new file mode 100644
index 000000000000..6c0e54bb5714
--- /dev/null
+++ b/website/content/docs/architecture/control-plane/index.mdx
@@ -0,0 +1,115 @@
+---
+layout: docs
+page_title: Consul control plane architecture
+description: >-
+ Consul datacenters consist of clusters of server agents (control plane) and client agents deployed alongside service instances (data plane). Learn how these components and their different communication methods make Consul possible.
+---
+
+# Consul control plane architecture
+
+This topic provides an overview of the Consul architecture. We recommend reviewing the [Consul glossary](/consul/docs/glossary) as a companion to this topic to help you become familiar with HashiCorp terms.
+
+> Refer to the [Reference Architecture tutorial](/consul/tutorials/production-deploy/reference-architecture) for hands-on guidance about deploying Consul in production.
+
+## Introduction
+
+Consul provides a control plane that enables you to register, access, and secure services deployed across your network. The _control plane_ is the part of the network infrastructure that maintains a central registry to track services and their respective IP addresses.
+
+When using Consul’s service mesh capabilities, Consul dynamically configures sidecar and gateway proxies in the request path, which enables you to authorize service-to-service connections, route requests to healthy service instances, and enforce mTLS encryption without modifying your service’s code. This ensures that communication remains performant and reliable. Refer to [Service Mesh Proxy Overview](/consul/docs/connect/proxy) for an overview of sidecar proxies.
+
+
+
+## Datacenters
+
+@include 'text/descriptions/datacenter.mdx'
+
+### Clusters
+
+@include 'text/descriptions/cluster.mdx'
+
+## Agents
+
+You can run the Consul binary to start Consul _agents_, which are daemons that implement Consul control plane functionality. You can start agents as servers or clients. Refer to [Consul agent](/consul/docs/fundamentals/agent) for additional information.
+
+### Server agents
+
+Consul server agents store all state information, including service and node IP addresses, health checks, and configuration. We recommend deploying three or five servers in a cluster. The more servers you deploy, the greater the resilience and availability in the event of a failure. More servers, however, slow down cluster consensus, which is a critical server function that enables Consul to efficiently and effectively process information.
+
+#### Consensus protocol
+
+Consul clusters elect a single server to be the _leader_ through a process called _consensus_. The leader processes all queries and transactions, which prevents conflicting updates in clusters containing multiple servers.
+
+Servers that are not currently acting as the cluster leader are called _followers_. Followers forward requests from client agents to the cluster leader. The leader replicates the requests to all other servers in the cluster. Replication ensures that if the leader is unavailable, other servers in the cluster can elect another leader without losing any data.
+
+Consul servers establish consensus using the Raft algorithm on port `8300`. Refer to [Consensus Protocol](/consul/docs/concept/consensus) for more information.
+
+
+
+### Client agents
+
+Consul clients report node and service health status to the Consul cluster. In a typical deployment, you must run client agents on every compute node in your datacenter. Clients use remote procedure calls (RPC) to interact with servers. By default, clients send RPC requests to the servers on port `8300`.
+
+There are no limits to the number of client agents or services you can use with Consul, but production deployments should distribute services across multiple Consul datacenters. Using a multi-datacenter deployment enhances infrastructure resilience and limits control plane issues. We recommend deploying a maximum of 5,000 client agents per datacenter. Some large organizations have deployed tens of thousands of client agents and hundreds of thousands of service instances across a multi-datacenter deployment. Refer to [Cross-datacenter requests](#cross-datacenter-requests) for additional information.
+
+You can also run Consul with an alternate service mesh configuration that deploys Envoy proxies but not client agents. Refer to [Simplified Service Mesh with Consul Dataplanes](/consul/docs/architecture/control-plane/dataplane) for more information.
+
+## LAN gossip pool
+
+Client and server agents participate in a LAN gossip pool so that they can distribute and perform node [health checks](/consul/docs/register/health-check/vm). Agents in the pool propagate the health check information across the cluster. Agent gossip communication occurs on port `8301` using UDP. Agent gossip falls back to TCP if UDP is not available. Refer to [gossip protocol](/consul/docs/concept/gossip) for additional information.
+
+The following simplified diagram shows the interactions between servers and clients.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Cross-datacenter requests
+
+Each Consul datacenter maintains its own catalog of services and their health. By default, the information is not replicated across datacenters. WAN federation and cluster peering are two multi-datacenter deployment models that enable service connectivity across datacenters.
+
+### WAN federation
+
+WAN federation is an approach for connecting multiple Consul datacenters. It requires you to designate a _primary datacenter_ that contains authoritative information about all datacenters, including service mesh configurations and access control list (ACL) resources.
+
+In this model, when a client agent requests a resource in a remote secondary datacenter, a local Consul server forwards the RPC request to a remote Consul server that has access to the resource. A remote server sends the results to the local server. If the remote datacenter is unavailable, its resources are also unavailable. By default, WAN-federated servers send cross-datacenter requests over TCP on port `8300`.
+
+You can configure control plane and data plane traffic to go through mesh gateways, which simplifies networking requirements.
+
+> **Hands-on**: To enable services to communicate across datacenters when the ACL system is enabled, refer to the [ACL Replication for Multiple Datacenters](/consul/tutorials/security-operations/access-control-replication-multiple-datacenters) tutorial.
+
+#### WAN gossip pool
+
+Servers may also participate in a WAN gossip pool, which is optimized for greater latency imposed by the Internet. The pool enables servers to exchange information, such as their addresses and health, and gracefully handle loss of connectivity in the event of a failure.
+
+In the following diagram, the servers in each data center participate in a WAN gossip pool by sending data over TCP/UDP on port `8302`. Refer to [Gossip Protocol](/consul/docs/concept/gossip) for additional information.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Cluster peering
+
+You can create peering connections between two or more independent clusters so that services deployed to different datacenters or admin partitions can communicate. An [admin partition](/consul/docs/multi-tenant/admin-partition) is a feature in Consul Enterprise that enables you to define isolated network regions that use the same Consul servers. In the cluster peering model, you create a token in one of the datacenters or partitions and configure another datacenter or partition to present the token to establish the connection.
+
+Refer to [cluster peering overview](/consul/docs/east-west/cluster-peering) for
+additional information.
diff --git a/website/content/docs/architecture/control-plane/k8s.mdx b/website/content/docs/architecture/control-plane/k8s.mdx
new file mode 100644
index 000000000000..4c18c1c2cd2b
--- /dev/null
+++ b/website/content/docs/architecture/control-plane/k8s.mdx
@@ -0,0 +1,40 @@
+---
+layout: docs
+page_title: Consul on Kubernetes architecture
+description: >-
+ When running on Kubernetes, Consul’s control plane architecture does not change significantly. Server agents are deployed as a StatefulSet with a persistent volume, while client agents can run as a k8s DaemonSet with an exposed API port or be omitted with Consul Dataplanes.
+---
+
+# Consul on Kubernetes architecture
+
+This topic describes the architecture, components, and resources associated with Consul deployments to Kubernetes. Consul employs the same architectural design on Kubernetes as it does with other platforms, but Kubernetes provides additional benefits that make operating a Consul cluster easier. Refer to [Consul control plane architecture](/consul/docs/architecture/control-plane) for more general information on Consul's architecture.
+
+> **More specific guidance:**
+> - For guidance on datacenter design, refer to [Consul and Kubernetes Reference Architecture](/consul/tutorials/kubernetes-production/kubernetes-reference-architecture).
+> - For step-by-step deployment guidance, refer to [Consul and Kubernetes Deployment Guide](/consul/tutorials/kubernetes-production/kubernetes-deployment-guide).
+> - For non-Kubernetes guidance, refer to the standard [production deployment guide](/consul/tutorials/production-deploy/deployment-guide).
+
+## Server agents on Kubernetes
+
+The server agents are deployed as a `StatefulSet` and use persistent volume claims to store the server state. This state ensures that the [node ID](/consul/docs/reference/agent/configuration-file/node#node_id) is persisted so that servers can be rescheduled onto new IP addresses without causing issues.
+
+The server agents are configured with [anti-affinity rules](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) so that they are placed on different nodes. A readiness probe is also configured to mark the pod as ready only when it has established a leader.
+
+A Kubernetes `Service` is registered to represent each Consul server and Kubernetes exposes ports that are required to communicate to the Consul server pods. The servers use the DNS address of this service to join a Consul cluster, without requiring any other access to the Kubernetes cluster. Additional Consul servers may also utilize non-ready endpoints that are published by the Kubernetes Service so that servers can use the service for joining during bootstrap and upgrades.
+
+A **PodDisruptionBudget** is configured so the Consul server cluster maintains quorum during voluntary operational events. The maximum unavailable is `(n/2)-1` where `n` is the number of server agents.
+
+-> **Note:** Kubernetes and Helm do not delete Persistent Volumes or Persistent Volume Claims when a [StatefulSet is deleted](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage). You must perform this action manually when removing servers.
+
+## Consul dataplane on Kubernetes
+
+By default, Consul on Kubernetes uses an alternate service mesh configuration that injects sidecars without client agents. _Consul dataplane_ manages Envoy proxies and leaves responsibility for other functions to the orchestrator, which removes the need to run client agents on every node.
+
+
+
+Refer to [Simplified Service Mesh with Consul dataplanes](/consul/docs/architecture/control-plane/dataplane) for more information.
+
+Consul dataplane is the default proxy manager in Consul on Kubernetes 1.14 and
+later. If you are on Consul 1.13 or older, refer to [upgrading to Consul
+Dataplane](/consul/docs/k8s/upgrade#upgrading-to-consul-dataplanes) for specific
+upgrade instructions.
diff --git a/website/content/docs/architecture/coordinates.mdx b/website/content/docs/architecture/coordinates.mdx
index 7bc37cc9c029..eb169a3e1aff 100644
--- a/website/content/docs/architecture/coordinates.mdx
+++ b/website/content/docs/architecture/coordinates.mdx
@@ -1,11 +1,11 @@
---
layout: docs
-page_title: Network Coordinates
+page_title: Network coordinates
description: >-
Network coordinates are node locations in network tomography used to estimate round trip time (RTT). Learn how network coordinates manifest in Consul, how it calculates RTT, and how to work with coordinates to sort catalog information by nearness to a given node.
---
-# Network Coordinates
+# Network coordinates
Consul uses a [network tomography](https://en.wikipedia.org/wiki/Network_tomography)
system to compute network coordinates for nodes in the cluster. These coordinates
diff --git a/website/content/docs/architecture/cts.mdx b/website/content/docs/architecture/cts.mdx
new file mode 100644
index 000000000000..f30e56de7720
--- /dev/null
+++ b/website/content/docs/architecture/cts.mdx
@@ -0,0 +1,79 @@
+---
+layout: docs
+page_title: Architecture
+description: >-
+ Learn about the Consul-Terraform-Sync architecture and high-level CTS components, such as the Terraform driver and tasks.
+---
+
+# Consul-Terraform-Sync Architecture
+
+Consul-Terraform-Sync (CTS) is a service-oriented tool for managing network infrastructure near real-time. CTS runs as a daemon and integrates the network topology maintained by your Consul cluster with your network infrastructure to dynamically secure and connect services.
+
+## CTS workflow
+
+The following diagram shows the CTS workflow as it monitors the Consul service catalog for updates.
+
+[](/img/nia-highlevel-diagram.svg)
+
+1. CTS monitors the state of Consul’s service catalog and its KV store. This process is described in [Watcher and views](#watcher-and-views).
+1. CTS detects a change.
+1. CTS prompts Terraform to update the state of the infrastructure.
+
+
+## Watcher and views
+
+CTS uses Consul's [blocking queries](/consul/api-docs/features/blocking) functionality to monitor Consul for updates. If an endpoint does not support blocking queries, CTS uses polling to watch for changes. These mechanisms are referred to in CTS as *watchers*.
+
+The watcher maintains a separate thread for each value monitored and runs any tasks that depend on the watched value whenever it is updated. These threads are referred to as _views_. For example, a thread may run a task to update a proxy when the watcher detects that an instance has become unhealthy .
+
+## Tasks
+
+A task is the action triggered by the updated data monitored in Consul. It
+takes that dynamic service data and translates it into a call to the
+infrastructure application to configure it with the updates. It uses a driver
+to push out these updates, the initial driver being a local Terraform run. An
+example of a task is to automate a firewall security policy rule with
+discovered IP addresses for a set of Consul services.
+
+## Drivers
+
+A driver encapsulates the resources required to communicate the updates to the
+network infrastructure. The following [drivers](/consul/docs/nia/network-drivers#terraform) are supported:
+
+- Terraform driver
+- HCP Terraform driver
+
+Each driver includes a set of providers that [enables support](/consul/docs/automate/infrastructure/module) for a wide variety of infrastructure applications.
+
+## State storage and persistence
+
+The following types of state information are associated with CTS.
+
+### Terraform state information
+
+By default, CTS stores [Terraform state data](/terraform/language/state) in the Consul KV, but you can specify where this information is stored by configuring the `backend` setting in the [Terraform driver configuration](/consul/docs/nia/configuration#backend). The data persists if CTS stops and the backend is configured to a remote location.
+
+### CTS task and event data
+
+By default, CTS stores task and event data in memory. This data is transient and does not persist. If you configure [CTS to run with high availability enabled](/consul/docs/automate/infrastructure/high-availability), CTS stores the data in the Consul KV. High availability is an enterprise feature that promotes CTS resiliency. When high availability is enabled, CTS stores and persists task changes and events that occur when an instance stops.
+
+The data stored when operating in high availability mode includes task changes made using the task API or CLI. Examples of task changes include creating a new task, deleting a task, and enabling or disabling a task. You can empty the leader’s stored state information by starting CTS with the [`-reset-storage` flag](/consul/docs/nia/cli/start#options).
+
+## Instance compatibility checks (high availability)
+
+If you [run CTS with high availability enabled](/consul/docs/automate/infrastructure/high-availability), CTS performs instance compatibility checks to ensure that all instances in the cluster behave consistently. Consistent instance behavior enables CTS to properly perform automations configured in the state storage.
+
+The CTS instance compatibility check reports an error if the task [module](/consul/docs/nia/configuration#module) is configured with a local module, but the module does not exist on the CTS instance. Refer to the [Terraform documentation](/terraform/language/modules/sources#module-sources) for additional information about module sources. Example log:
+
+```shell-session
+[ERROR] ha.compat: error="compatibility check failure: stat ./example-module: no such file or directory"
+```
+Refer to [Error Messages](/consul/docs/error-messages/cts) for additional information.
+
+CTS instances perform a compatibility check on start-up based on the stored state and every five minutes after starting. If the check detects an incompatible CTS instance, it generates a log so that an operator can address it.
+
+CTS logs the error message and continues to run when it finds an incompatibility. CTS can still elect an incompatible instance to be the leader, but tasks affected by the incompatibility do not run successfully. This can happen when all active CTS instances enter [`once-mode`](/consul/docs/nia/cli/start#modes) and run the tasks once when initially elected.
+
+## Security guidelines
+
+We recommend following the network security guidelines described in the [Secure Consul-Terraform-Sync for Production](/consul/tutorials/network-infrastructure-automation/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial. The tutorial contains a checklist of best practices to secure your CTS installation for a production environment.
\ No newline at end of file
diff --git a/website/content/docs/architecture/data-plane/connect.mdx b/website/content/docs/architecture/data-plane/connect.mdx
new file mode 100644
index 000000000000..9816910a743a
--- /dev/null
+++ b/website/content/docs/architecture/data-plane/connect.mdx
@@ -0,0 +1,74 @@
+---
+layout: docs
+page_title: Consul service mesh
+description: >-
+ Consul's service mesh enforces secure service communication using mutual TLS (mTLS) encryption and explicit authorization. Learn how service mesh certificate authorities, intentions, and agents work together to provide Consul's service mesh capabilities.
+---
+
+# Consul service mesh
+
+This topic describes how the core features of Consul's service mesh work.
+
+This document uses _connect_ to refer to the subsystem that provides Consul's service mesh capabilities. We use this word because you define the service mesh capabilities in the `connect` stanza of Consul and Nomad agent configurations.
+
+## Mutual transport layer security (mTLS)
+
+The core of Consul service mesh is based on [mutual TLS](https://en.wikipedia.org/wiki/Mutual_authentication).
+
+Consul service mesh secures service-to-service communication using TLS certificates for identity. These certificates comply with the [SPIFFE X.509 standard](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md), ensuring interoperability with other SPIFFE-compliant systems. Consul includes a built-in certificate authority (CA) for generating and distributing these certificates, and also integrates with [Vault](/consul/docs/secure-mesh/certificate/vault). Consul's PKI system is designed to be extendable to support any system by adding CA providers.
+
+During the connection attempt, the client service first verifies the destination service's certificate using the [public CA bundle](/consul/api-docs/connect/ca#list-ca-root-certificates). The client also presents its own certificate to authenticate its identity to the destination service. The destination service, in turn, verifies the client's certificate against the same public CA bundle. If this mutual certificate validation is successful, an encrypted and authenticated TLS connection is established.
+
+Once the secure connection is in place, the destination service proceeds with authorization based on its configured application protocol:
+
+- TCP (L4) services must authorize _incoming connections_ against the configured set of [service intentions](/consul/docs/secure-mesh/intention).
+- HTTP (L7) services must authorize _incoming requests_ against those same intentions.
+
+If the intention check is successful, the connection (for TCP) or the specific request (for HTTP) is permitted. Otherwise, it is rejected.
+
+All APIs required for Consul service mesh typically respond in microseconds and impose minimal overhead to existing services. To ensure this, Consul service mesh-related API calls
+are all made to the local Consul agent over a loopback interface, and all [agent `/connect` endpoints](/consul/api-docs/agent/connect) implement local caching, background
+updating, and support blocking queries. Most API calls operate on purely local in-memory data.
+
+## Agent caching and performance
+
+To enable fast responses on endpoints such as the [agent connect API](/consul/api-docs/agent/connect), the Consul agent locally caches most Consul service mesh-related
+data and sets up background [blocking queries](/consul/api-docs/features/blocking) against the server to update the cache in the background. This setup allows most API calls to use in-memory data and respond quickly.
+
+All data cached locally by the agent is populated on demand. Therefore, if Consul service mesh is not used at all, the cache does not store any data. On first request, the following data is loaded from the server and cached:
+
+- public CA root certificates
+- leaf certificates
+- service intentions
+- service discovery results for upstreams
+
+For leaf certificates and service intentions, the agent only caches data related to the service requested, not the full set of data.
+
+The cache is partitioned by ACL token and datacenters. This partition minimizes the complexity of the cache and prevents an ACL token from accessing data it should not have access to. This partition results in higher memory usage for cached data since it is duplicated per ACL token.
+
+With Consul service mesh enabled, you are likely to observe increased memory usage by the local Consul agent. Memory usage scales with the number of service intentions associated with the registered services on the agent. The other data, including leaf certificates and public CA certificates, is a relatively fixed size per service. In most cases, the overhead per service should be relatively small and measure in single-digit kilobytes at most.
+
+The cache does not evict entries due to memory pressure. If memory capacity is reached, the process will attempt to swap. If swap is disabled, the Consul agent may begin failing and eventually crash. Each cache entry has a default time-to-live (TTL) of 3 days and is automatically removed if not accessed during that period.
+
+## Connections across datacenters
+
+A [sidecar proxy's upstream configuration](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference) may specify an alternative datacenter or a prepared query that can address services in multiple datacenters.
+
+[Service intentions](/consul/docs/secure-mesh/intention) verify connections between services by source and destination name seamlessly across datacenters.
+
+You can make connections with gateways to enable communication across network topologies, which enables connections between services in each datacenter without externally routable IPs at the service level.
+
+### Service intention replication
+
+You can specify a datacenter that is authoritative for intentions by setting the [`primary_datacenter`](/consul/docs/reference/agent/configuration-file#primary_datacenter) configuration. When you do this, Consul automatically replicates intentions from the primary datacenter to the secondary datacenters.
+
+In production setups with ACLs enabled, you must also set the [replication token](/consul/docs/reference/agent/configuration-file#acl_tokens_replication) in the secondary datacenter server's configuration.
+
+### Certificate authority federation
+
+The primary datacenter also acts as the root certificate authority (CA) for Consul service mesh. The primary datacenter generates a trust-domain UUID and obtains a root certificate
+from the configured CA provider which defaults to the built-in one.
+
+Secondary datacenters retrieve the root CA public key and trust-domain ID from the primary datacenter. They then create their own private key and generate a certificate signing request (CSR) to obtain an intermediate CA certificate. The primary datacenter's root CA signs this CSR and returns the signed intermediate certificate. With this intermediate certificate in place, the secondary datacenter can independently issue new certificates for its Consul service mesh without requiring WAN communication to the primary. For security, private CA keys remain isolated within their respective datacenters and are never shared between them.
+
+Secondary datacenters continuously monitor the root CA certificate in the primary datacenter. When the primary's root CA changes, whether due to planned rotation or CA migration, the secondary datacenter automatically generates new keys, gets them signed by the primary's updated root CA, and then systematically rotates all issued certificates within the secondary datacenter. This makes CA root key rotation fully automatic with zero downtime across multiple datacenters.
diff --git a/website/content/docs/architecture/data-plane/gateway.mdx b/website/content/docs/architecture/data-plane/gateway.mdx
new file mode 100644
index 000000000000..9f47b74b341a
--- /dev/null
+++ b/website/content/docs/architecture/data-plane/gateway.mdx
@@ -0,0 +1,81 @@
+---
+layout: docs
+page_title: Gateways
+description: >-
+ Gateways are proxies that direct traffic into, out of, and inside of Consul's service mesh. They secure communication with external or non-mesh network resources and enable services on different runtimes, cloud providers, or with overlapping IP addresses to communicate with each other.
+---
+
+# Gateways
+
+This topic provides an overview of the gateway features shipped with Consul. Gateways provide connectivity into, out of, and between Consul service meshes. You can configure the following types of gateways:
+
+- [API gateways](/consul/docs/north-south/api-gateway) handle and secure incoming requests from external clients, routing them to services within the mesh. They offer advanced Layer 7 features like authentication and routing.
+- [Ingress gateways](#ingress-gateways) (deprecated) handle incoming traffic from external clients to services inside the mesh. API gateway is the recommended alternative.
+- [Terminating gateways](#terminating-gateways) enable services within the mesh to securely communicate with external services outside the mesh — such as legacy systems or third-party APIs.
+- [Mesh gateways](#mesh-gateways) enable service-to-service traffic between Consul datacenters or between Consul admin partitions. They also enable datacenters to be federated across wide area networks.
+
+[](/img/consul-connect/svgs/consul_gateway_overview.svg)
+
+## API gateways
+
+API gateways enable network access, from outside a service mesh, to services running in a Consul service mesh. The systems accessing the services in the mesh may be within your organizational network or external to it. This type of network traffic is commonly called _north-south_ network traffic because it refers to the flow of data into and out of a specific environment.
+
+API gateways solve the following primary use cases:
+
+- **Control access at the point of entry**: Set the protocols of external connection requests and secure inbound connections with TLS certificates from trusted providers, such as Verisign and Let's Encrypt.
+
+- **Simplify traffic management**: Load balance requests across services and route traffic to the appropriate service by matching one or more criteria, such as hostname, path, header presence or value, and HTTP method.
+
+Refer to the following documentation for information on how to configure and deploy API gateways:
+- [API Gateways on VMs](/consul/docs/north-south/api-gateway/vm/listener)
+- [API Gateways for Kubernetes](/consul/docs/north-south/api-gateway/k8s/listener).
+
+## Ingress gateways
+
+
+
+Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported in this version but will be removed in a future release of Consul.
+
+Consul's API gateway is the recommended alternative to ingress gateway.
+
+
+
+Ingress gateways enable connectivity within your organizational network from services outside the Consul service mesh to services in the mesh. To accept ingress traffic from the public internet, use Consul's [API Gateway](/consul/docs/north-south/api-gateway) instead.
+
+Ingress gateways let you define what services should be exposed, on what port, and by what hostname. You configure an ingress gateway by defining a set of listeners that can map to different sets of backing services.
+
+Ingress gateways are tightly integrated with Consul's L7 configuration and enable dynamic routing of HTTP requests by attributes like the request path.
+
+For more information about ingress gateways, review the [complete documentation](/consul/docs/north-south/ingress-gateway) and the [ingress gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-ingress-gateways).
+
+
+
+## Terminating gateways
+
+Terminating gateways enable services within the mesh to securely communicate with external services outside the mesh — such as legacy systems or third-party APIs.
+
+Services outside the mesh do not have sidecar proxies or are not [integrated natively](/consul/docs/automate/native). These may be services running on legacy infrastructure or managed cloud services running on infrastructure you do not control.
+
+Terminating gateways effectively act as egress proxies that can represent one or more services. They terminate service mesh mTLS connections, enforce Consul intentions, and forward requests to the appropriate destination.
+
+These gateways also simplify authorization from dynamic service addresses. Consul's intentions determine whether connections through the gateway are authorized. Then traditional tools like firewalls or IAM roles can authorize the connections from the known gateway nodes to the destination services.
+
+For more information about terminating gateways, review the [complete documentation](/consul/docs/north-south/terminating-gateway) and the [terminating gateway tutorial](/consul/tutorials/developer-mesh/terminating-gateways-connect-external-services).
+
+
+
+## Mesh gateways
+
+Mesh gateways enable service mesh traffic to be routed between different Consul datacenters and admin partitions. The datacenters or partitions can reside in different clouds or runtime environments where general interconnectivity between all services in all datacenters is not feasible.
+
+They operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested.
+
+Mesh gateways enable the following scenarios:
+
+- **Federate multiple datacenters across a WAN.** Since Consul 1.8.0, mesh gateways can forward gossip and RPC traffic between Consul servers. See [WAN federation via mesh gateways](/consul/docs/east-west/mesh-gateway/enable) for additional information.
+- **Service-to-service communication across WAN-federated datacenters.** Refer to [Enabling Service-to-service Traffic Across Datacenters](/consul/docs/east-west/mesh-gateway/federation) for additional information.
+- **Service-to-service communication across admin partitions.** Since Consul 1.11.0, you can create administrative boundaries for single Consul deployments called "admin partitions". You can use mesh gateways to facilitate cross-partition communication. Refer to [Enabling Service-to-service Traffic Across Admin Partitions](/consul/docs/east-west/mesh-gateway/admin-partition) for additional information.
+- **Bridge multiple datacenters using Cluster Peering.** Since Consul 1.14.0, mesh gateways can be used to route peering control-plane traffic between peered Consul Servers. See [Mesh Gateways for Peering Control Plane Traffic](/consul/docs/east-west/mesh-gateway/cluster-peer) for more information.
+- **Service-to-service communication across peered datacenters.** Refer to [Establish cluster peering connections](/consul/docs/east-west/cluster-peering/establish/vm) for more information.
+
+-> **Mesh gateway tutorial**: Follow the [mesh gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways) to learn concepts associated with mesh gateways.
\ No newline at end of file
diff --git a/website/content/docs/architecture/data-plane/index.mdx b/website/content/docs/architecture/data-plane/index.mdx
new file mode 100644
index 000000000000..6abb30c3c807
--- /dev/null
+++ b/website/content/docs/architecture/data-plane/index.mdx
@@ -0,0 +1,39 @@
+---
+layout: docs
+page_title: Consul data plane architecture
+description: >-
+ Consul provides features that help you manage your application's data plane. Learn about Consul's data plane, including its architectural components.
+---
+
+# Consul data plane architecture
+
+This topic describes Consul's architecture and operations in an application's data plane. Consul can deploy gateways and sidecar proxies to help you secure, observe, and manage application traffic.
+
+For information about the lightweight workload agents Consul uses for container-based applications on Kubernetes and AWS ECS, refer to [Consul dataplanes](/consul/docs/architecture/control-plane/dataplane).
+
+## Introduction
+
+Consul provides control plane features that help you manage your application's data plane, but the Consul process does not run directly in the data plane.
+
+When using Consul for service discovery, no additional components or configurations are required for the data plane.
+
+When using Consul's service mesh features, you can use Consul to create sidecar proxies and gateways to manage, secure, and observe service-to-service traffic.
+
+## Sidecar proxies
+
+Consul uses proxies to secure, manage, and observe all service-to-service communication. The primary mechanism is sidecar proxies, which are deployed alongside each service instance to handle all incoming and outgoing traffic. Consul includes native support for Envoy proxies, but can be configured to work with other proxy implementations.
+
+## Gateways
+
+Gateways are specialized proxies that manage specific types of traffic into, out of, or across your service mesh. There are four kinds of gateways:
+
+1. **API gateways** handle and secure incoming requests from external clients, routing them to services within the mesh. They offer advanced Layer 7 features like authentication and routing.
+2. **Ingress gateways** (deprecated) handle incoming traffic from external clients to services inside the mesh. API gateway is the recommended alternative.
+3. **Terminating gateways** enable services within the mesh to securely communicate with external services outside the mesh — such as legacy systems or third-party APIs.
+4. **Mesh gateways** enable service-to-service traffic between Consul datacenters or between Consul admin partitions. They also enable datacenters to be federated across wide area networks.
+
+For more information about each type of gateway, refer to [gateways](/consul/docs/architecture/data-plane/gateway).
+
+## Next steps
+
+Learn about [Consul's security architecture](/consul/docs/architecture/security) to learn about the encryption systems and verification protocols Consul uses to secure data plane operations.
\ No newline at end of file
diff --git a/website/content/docs/architecture/data-plane/service.mdx b/website/content/docs/architecture/data-plane/service.mdx
new file mode 100644
index 000000000000..fcde59174308
--- /dev/null
+++ b/website/content/docs/architecture/data-plane/service.mdx
@@ -0,0 +1,48 @@
+---
+layout: docs
+page_title: Services overview
+description: >-
+ Learn about services and service discovery workflows and concepts for virtual machine environments.
+---
+
+# Services
+
+This topic provides overview information about services and how to make them discoverable in Consul when your network operates on virtual machines. If service mesh is enabled in your network, refer to the following articles for additional information about connecting services in a mesh:
+
+- [How Service Mesh Works](/consul/docs/architecture/data-plane/connect)
+- [How Consul Service Mesh Works on Kubernetes](/consul/docs/k8s/connect)
+
+## Introduction
+
+A _service_ is an entity in your network that performs a specialized operation or set of related operations. In many contexts, a service is software that you want to make available to users or other programs with access to your network. Services can also refer to native Consul functionality, such as _service mesh proxies_ and _gateways_, that enable you to establish connections between different parts of your network.
+
+You can define and register services with Consul, which makes them discoverable to other services in the network. You can also configure health checks for these services to enable automated failover and load balancing. For example, health checks can help load balancers automatically remove unhealthy service instances, or trigger the promotion of a new database primary when the current one fails.
+
+## Workflow
+
+For service discovery, the core Consul workflow for services consists of three stages:
+
+1. **Define services and health checks:** A service definition lets you define various aspects of the service, including how it is discovered by other services in the network. You can define health checks in the service definitions to verify the health of the service. Refer to [Define Services](/consul/docs/register/service/vm/define) and [Define Health Checks](/consul/docs/register/health-check/vm) for additional information.
+
+1. **Register services and health checks:** After defining your services and health checks, you must register them with a Consul agent. Refer to [Register Services and Health Checks](/consul/docs/register/service/vm) for additional information.
+
+1. **Query for services:** After registering your services and health checks, other services in your network can use the DNS to perform static or dynamic lookups to access your service. Refer to [DNS Usage Overview](/consul/docs/discover/dns) for additional information about the different ways to discover services in your datacenters.
+
+## Service mesh use cases
+
+Consul routes service traffic through sidecar proxies if you use Consul service mesh. As a result, you must specify upstream configurations in service definitions. The service mesh experience is different for virtual machine (VM) and Kubernetes environments.
+
+### Virtual machines
+
+You must define upstream services in the service definition. Consul uses the upstream configuration to bind the service with its upstreams. After registering the service, you must start a sidecar proxy on the VM to enable mesh connectivity. Refer to [Deploy sidecar services](/consul/docs/connect/proxy/sidecar) for additional information.
+
+### Kubernetes
+
+If you use Consul on Kubernetes, enable the service mesh injector in your Consul Helm chart to have Consul automatically add a sidecar to each of your pods using the Kubernetes `Service` definition as a reference. You can specify upstream annotations in the `Deployment` definition to bind upstream services to the pods.
+Refer to [`connectInject`](/consul/docs/k8s/connect#installation-and-configuration) and [the upstreams annotation documentation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) for additional information.
+
+### Multiple services
+
+You can define common characteristics for services in your mesh, such as the admin partition, namespace, or upstreams, by creating and applying a `service-defaults` configuration entry. You can also define override configurations for specific upstreams or service instances. To use `service-defaults` configuration entries, you must enable Consul service mesh in your network.
+
+Refer to [Define Service Defaults](/consul/docs/services/usage/define-services#define-service-defaults) for additional information.
diff --git a/website/content/docs/architecture/gossip.mdx b/website/content/docs/architecture/gossip.mdx
deleted file mode 100644
index 12a4ef8de7ac..000000000000
--- a/website/content/docs/architecture/gossip.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: docs
-page_title: Gossip Protocol | Serf
-description: >-
- Consul agents manage membership in datacenters and WAN federations using the Serf protocol. Learn about the differences between LAN and WAN gossip pools and how `serfHealth` affects health checks.
----
-
-# Gossip Protocol
-
-Consul uses a [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol)
-to manage membership and broadcast messages to the cluster. The protocol, membership management, and message broadcasting is provided
-through the [Serf library](https://github.com/hashicorp/serf/). The gossip protocol
-used by Serf is based on a modified version of the
-[SWIM (Scalable Weakly-consistent Infection-style Process Group Membership)](https://www.cs.cornell.edu/projects/Quicksilver/public_pdfs/SWIM.pdf) protocol.
-Refer to the [Serf documentation](https://github.com/hashicorp/serf/blob/master/docs/internals/gossip.html.markdown) for additional information about the gossip protocol.
-
-## Gossip in Consul
-
-Consul uses a LAN gossip pool and a WAN gossip pool to perform different functions. The pools
-are able to perform their functions by leveraging an embedded [Serf](https://github.com/hashicorp/serf/)
-library. The library is abstracted and masked by Consul to simplify the user experience,
-but developers may find it useful to understand how the library is leveraged.
-
-### LAN Gossip Pool
-
-Each datacenter that Consul operates in has a LAN gossip pool containing all members
-of the datacenter (clients _and_ servers). Membership information provided by the
-LAN pool allows clients to automatically discover servers, reducing the amount of
-configuration needed. Failure detection is also distributed and shared by the entire cluster,
-instead of concentrated on a few servers. Lastly, the gossip pool allows for fast and
-reliable event broadcasts.
-
-### WAN Gossip Pool
-
-The WAN pool is globally unique. All servers should participate in the WAN pool,
-regardless of datacenter. Membership information provided by the WAN pool allows
-servers to perform cross-datacenter requests. The integrated failure detection
-allows Consul to gracefully handle loss of connectivity--whether the loss is for
-an entire datacenter, or a single server in a remote datacenter.
-
-## Lifeguard Enhancements ((#lifeguard))
-
-SWIM assumes that the local node is healthy, meaning that soft real-time packet
-processing is possible. The assumption may be violated, however, if the local node
-experiences CPU or network exhaustion. In these cases, the `serfHealth` check status
-can flap. This can result in false monitoring alarms, additional telemetry noise, and
-CPU and network resources being wasted as they attempt to diagnose non-existent failures.
-
-Lifeguard completely resolves this issue with novel enhancements to SWIM.
-
-For more details about Lifeguard, please see the
-[Making Gossip More Robust with Lifeguard](https://www.hashicorp.com/blog/making-gossip-more-robust-with-lifeguard/)
-blog post, which provides a high level overview of the HashiCorp Research paper
-[Lifeguard : SWIM-ing with Situational Awareness](https://arxiv.org/abs/1707.00788). The
-[Serf gossip protocol guide](https://github.com/hashicorp/serf/blob/master/docs/internals/gossip.html.markdown#lifeguard-enhancements)
-also provides some lower-level details about the gossip protocol and Lifeguard.
diff --git a/website/content/docs/architecture/improving-consul-resilience.mdx b/website/content/docs/architecture/improving-consul-resilience.mdx
deleted file mode 100644
index aea40f3558e9..000000000000
--- a/website/content/docs/architecture/improving-consul-resilience.mdx
+++ /dev/null
@@ -1,177 +0,0 @@
----
-layout: docs
-page_title: Fault Tolerance in Consul
-description: >-
- Fault tolerance is a system's ability to operate without interruption despite component failure. Learn how a set of Consul servers provide fault tolerance through use of a quorum, and how to further improve control plane resilience through use of infrastructure zones and Enterprise redundancy zones.
----
-
-# Fault tolerance
-
-
-You must give careful consideration to reliability in the architecture frameworks that you build. When you build a resilient platform, it minimizes the remediation actions you need to take when a failure occurs. This document provides useful information on how to design and operate a resilient Consul cluster, including the methods and functionalities for this goal.
-
-Consul has many features that operate both locally and remotely that can help you offer a resilient service across multiple datacenters.
-
-
-## Introduction
-
-Fault tolerance is the ability of a system to continue operating without interruption
-despite the failure of one or more components. In Consul, the number of server agents determines the fault tolerance.
-
-
-Each Consul datacenter depends on a set of Consul voting server agents.
-The voting servers ensure Consul has a consistent, fault-tolerant state
-by requiring a majority of voting servers, known as a quorum, to agree upon any state changes.
-Examples of state changes include: adding or removing services,
-adding or removing nodes, and changes in service or node health status.
-
-Without a quorum, Consul experiences an outage:
-it cannot provide most of its capabilities because they rely on
-the availability of this state information.
-If Consul has an outage, normal operation can be restored by following the
-[Disaster recovery for Consul clusters guide](/consul/tutorials/datacenter-operations/recovery-outage).
-
-If Consul is deployed with 3 servers, the quorum size is 2. The deployment can lose 1
-server and still maintain quorum, so it has a fault tolerance of 1.
-If Consul is instead deployed with 5 servers, the quorum size increases to 3, so
-the fault tolerance increases to 2.
-To learn more about the relationship between the
-number of servers, quorum, and fault tolerance, refer to the
-[consensus protocol documentation](/consul/docs/architecture/consensus#deployment_table).
-
-Effectively mitigating your risk is more nuanced than just increasing the fault tolerance
-because the infrastructure costs can outweigh the improved resiliency. You must also consider correlated risks at the infrastructure-level. There are occasions when multiple servers fail at the same time. That means that a single failure could cause a Consul outage, even if your server-level fault tolerance is 2.
-
-Different options for your resilient datacenter present trade-offs between operational complexity, computing cost, and Consul request performance. Consider these factors when designing your resilient architecture.
-
-## Fault tolerance
-
-The following sections explore several options for increasing Consul's fault tolerance. For enhanced reliability, we recommend taking a holistic approach by layering these multiple functionalities together.
-
-- Spread servers across infrastructure [availability zones](#availability-zones).
-- Use a [minimum quorum size](#quorum-size) to avoid performance impacts.
-- Use [redundancy zones](#redundancy-zones) to improve fault tolerance.
-- Use [Autopilot](#autopilot) to automatically prune failed servers and maintain quorum size.
-- Use [cluster peering](#cluster-peering) to provide service redundancy.
-
-### Availability zones
-
-
-The cloud or on-premise infrastructure underlying your [Consul datacenter](/consul/docs/install/glossary#datacenter) can run across multiple availability zones.
-
-An availability zone is meant to share no points of failure with other zones by:
-- Having power, cooling, and networking systems independent from other zones
-- Being physically distant enough from other zones so that large-scale disruptions
- such as natural disasters (flooding, earthquakes) are very unlikely to affect multiple zones
-
-Availability zones are available in the regions of most cloud providers and in some on-premise installations.
-If possible, spread your Consul voting servers across 3 availability zones
-to protect your Consul datacenter from a single zone-level failure.
-For example, if deploying 5 Consul servers across 3 availability zones, place no more than 2 servers in each zone.
-If one zone fails, at most 2 servers are lost and quorum will be maintained by the 3 remaining servers.
-
-To distribute your Consul servers across availability zones, modify your infrastructure configuration with your infrastructure provider. No change is needed to your Consul server's agent configuration.
-
-Additionally, you should leverage resources that can automatically restore your compute instance,
-such as autoscaling groups, virtual machine scale sets, or compute engine autoscaler.
-Customize autoscaling resources to re-deploy servers into specific availability zones and ensure the desired numbers of servers are available at all times.
-
-### Quorum size
-
-For most production use cases, we recommend using a minimum quorum of either 3 or 5 voting servers,
-yielding a server-level fault tolerance of 1 or 2 respectively.
-
-Even though it would improve fault tolerance,
-adding voting servers beyond 5 is **not recommended** because it decreases Consul's performance—
-it requires Consul to involve more servers in every state change or consistent read.
-
-Consul Enterprise users can use redundancy zones to improve fault tolerance without this performance penalty.
-
-### Redundancy zones
-
-Use Consul Enterprise [redundancy zones](/consul/docs/enterprise/redundancy) to improve fault tolerance without the performance penalty of increasing the number of voting servers.
-
-
-
-
-Each redundancy zone should be assigned 2 or more Consul servers.
-If all servers are healthy, only one server per redundancy zone will be an active voter;
-all other servers will be backup voters.
-If a zone's voter is lost, it will be replaced by:
-- A backup voter within the same zone, if any. Otherwise,
-- A backup voter within another zone, if any.
-
-Consul can replace lost voters with backup voters within 30 seconds in most cases.
-Because this replacement process is not instantaneous,
-redundancy zones do not improve immediate fault tolerance—
-the number of healthy voting servers that can fail at once without causing an outage.
-Instead, redundancy zones improve optimistic fault tolerance:
-the number of healthy active and back-up voting servers that can fail gradually without causing an outage.
-
-The relationship between these two types of fault tolerance is:
-
-_Optimistic fault tolerance = immediate fault tolerance + the number of healthy backup voters_
-
-For example, consider a Consul datacenter with 3 redundancy zones and 2 servers per zone.
-There will be 3 voting servers (1 per zone), meaning a quorum size of 2 and an immediate fault tolerance of 1.
-There will also be 3 backup voters (1 per zone), each of which increase the optimistic fault tolerance.
-Therefore, the optimistic fault tolerance is 4.
-This provides performance similar to a 3 server setup with fault tolerance similar to a 7 server setup.
-
-We recommend associating each Consul redundancy zone with an infrastructure availability zone
-to also gain the infrastructure-level fault tolerance benefits provided by availability zones.
-However, Consul redundancy zones can be used even without the backing of infrastructure availability zones.
-
-For more information on redundancy zones, refer to:
-- [Redundancy zone documentation](/consul/docs/enterprise/redundancy)
- for a more detailed explanation
-- [Redundancy zone tutorial](/consul/tutorials/enterprise/redundancy-zones)
- to learn how to use them
-
-### Autopilot
-
-Autopilot is a set of functions that introduce servers to a cluster, cleans up dead servers, and monitors the state of the Raft protocol in the Consul cluster.
-
-When you enable Autopilot's dead server cleanup, Autopilot marks failed servers as `Left` and removes them from the Raft peer set to prevent them from interfering with the quorum size. Autopilot does that as soon as a replacement Consul server comes online. This behavior is beneficial when server nodes failed and have been redeployed but Consul considers them as new nodes because their IP address and hostnames have changed. Autopilot keeps the cluster peer set size correct and the quorum requirement simple.
-
-To illustrate the Autopilot advantage, consider a scenario where Consul has a cluster of five server nodes. The quorum is three, which means the cluster can lose two server nodes before the cluster fails. The following events happen:
-
-1. Two server nodes fail.
-1. Two replacement nodes are deployed with new hostnames and IPs.
-1. The two replacement nodes rejoin the Consul cluster.
-1. Consul treats the replacement nodes as extra nodes, unrelated to the previously failed nodes.
-
-_With Autopilot not enabled_, the following happens:
-
-1. Consul does not immediately clean up the failed nodes when the replacement nodes join the cluster.
-1. The cluster now has the three surviving nodes, the two failed nodes, and the two replacement nodes, for a total of seven nodes.
- - The quorum is increased to four, which means the cluster can only afford to lose one node until after the two failed nodes are deleted in seventy-two hours.
- - The redundancy level has decreased from its initial state.
-
-_With Autopilot enabled_, the following happens:
-
-1. Consul immediately cleans up the failed nodes when the replacement nodes join the cluster.
-1. The cluster now has the three surviving nodes and the two replacement nodes, for a total of five nodes.
- - The quorum stays at three, which means the cluster can afford to lose two nodes before it fails.
- - The redundancy level remains the same.
-
-### Cluster peering
-
-Linking multiple Consul clusters together to provide service redundancy is the most effective method to prevent disruption from failure. This method is enhanced when you design individual Consul clusters with resilience in mind. Consul clusters interconnect in two ways: WAN federation and cluster peering. We recommend using cluster peering whenever possible.
-
-Cluster peering lets you connect two or more independent Consul clusters using mesh gateways, so that services can communicate between non-identical partitions in different datacenters.
-
-
-
-
-Cluster peering is the preferred way to interconnect clusters because it is operationally easier to configure and manage than WAN federation. Cluster peering communication between two datacenters runs only on one port on the related Consul mesh gateway, which makes it operationally easy to expose for routing purposes.
-
-When you use cluster peering to connect admin partitions between datacenters, use Consul’s dynamic traffic management functionalities `service-splitter`, `service-router` and `service-failover` to configure your service mesh to automatically forward or failover service traffic between peer clusters. Consul can then manage the traffic intended for the service and do [failover](/consul/docs/connect/config-entries/service-resolver#spec-failover), [load-balancing](/consul/docs/connect/config-entries/service-resolver#spec-loadbalancer), or [redirection](/consul/docs/connect/config-entries/service-resolver#spec-redirect).
-
-Cluster peering also extends service discovery across different datacenters independent of service mesh functions. After you peer datacenters, you can refer to services between datacenters with `.virtual.peer.consul` in Consul DNS. For Consul Enterprise, your query string may need to include the namespace, partition, or both. Refer to the [Consul DNS documentation](/consul/docs/services/discovery/dns-static-lookups#service-virtual-ip-lookups) for details on building virtual service lookups.
-
-For more information on cluster peering, refer to:
-- [Cluster peering documentation](/consul/docs/connect/cluster-peering)
- for a more detailed explanation
-- [Cluster peering tutorial](/consul/tutorials/implement-multi-tenancy/cluster-peering)
- to learn how to implement cluster peering
diff --git a/website/content/docs/architecture/index.mdx b/website/content/docs/architecture/index.mdx
deleted file mode 100644
index dc3f7954bdd0..000000000000
--- a/website/content/docs/architecture/index.mdx
+++ /dev/null
@@ -1,114 +0,0 @@
----
-layout: docs
-page_title: Consul Architecture
-description: >-
- Consul datacenters consist of clusters of server agents (control plane) and client agents deployed alongside service instances (data plane). Learn how these components and their different communication methods make Consul possible.
----
-
-# Consul Architecture
-
-This topic provides an overview of the Consul architecture. We recommend reviewing the Consul [glossary](/consul/docs/install/glossary) as a companion to this topic to help you become familiar with HashiCorp terms.
-
-> Refer to the [Reference Architecture tutorial](/consul/tutorials/production-deploy/reference-architecture) for hands-on guidance about deploying Consul in production.
-
-## Introduction
-
-Consul provides a control plane that enables you to register, access, and secure services deployed across your network. The _control plane_ is the part of the network infrastructure that maintains a central registry to track services and their respective IP addresses.
-
-When using Consul’s service mesh capabilities, Consul dynamically configures sidecar and gateway proxies in the request path, which enables you to authorize service-to-service connections, route requests to healthy service instances, and enforce mTLS encryption without modifying your service’s code. This ensures that communication remains performant and reliable. Refer to [Service Mesh Proxy Overview](/consul/docs/connect/proxies) for an overview of sidecar proxies.
-
-
-
-## Datacenters
-
-The Consul control plane contains one or more _datacenters_. A datacenter is the smallest unit of Consul infrastructure that can perform basic Consul operations. A datacenter contains at least one [Consul server agent](#server-agents), but a real-world deployment contains three or five server agents and several [Consul client agents](#client-agents). You can create multiple datacenters and allow nodes in different datacenters to interact with each other. Refer to [Bootstrap a Datacenter](/consul/docs/install/bootstrapping) for information about how to create a datacenter.
-
-### Clusters
-
-A collection of Consul agents that are aware of each other is called a _cluster_. The terms _datacenter_ and _cluster_ are often used interchangeably. In some cases, however, _cluster_ refers only to Consul server agents, such as in [HCP Consul Dedicated](https://cloud.hashicorp.com/products/consul). In other contexts, such as the [_admin partitions_](/consul/docs/enterprise/admin-partitions) feature included with Consul Enterprise, a cluster may refer to collection of client agents.
-
-## Agents
-
-You can run the Consul binary to start Consul _agents_, which are daemons that implement Consul control plane functionality. You can start agents as servers or clients. Refer to [Consul Agent](/consul/docs/agent) for additional information.
-
-### Server agents
-
-Consul server agents store all state information, including service and node IP addresses, health checks, and configuration. We recommend deploying three or five servers in a cluster. The more servers you deploy, the greater the resilience and availability in the event of a failure. More servers, however, slow down [consensus](#consensus-protocol), which is a critical server function that enables Consul to efficiently and effectively process information.
-
-#### Consensus protocol
-
-Consul clusters elect a single server to be the _leader_ through a process called _consensus_. The leader processes all queries and transactions, which prevents conflicting updates in clusters containing multiple servers.
-
-Servers that are not currently acting as the cluster leader are called _followers_. Followers forward requests from client agents to the cluster leader. The leader replicates the requests to all other servers in the cluster. Replication ensures that if the leader is unavailable, other servers in the cluster can elect another leader without losing any data.
-
-Consul servers establish consensus using the Raft algorithm on port `8300`. Refer to [Consensus Protocol](/consul/docs/architecture/consensus) for additional information.
-
-
-
-### Client agents
-
-Consul clients report node and service health status to the Consul cluster. In a typical deployment, you must run client agents on every compute node in your datacenter. Clients use remote procedure calls (RPC) to interact with servers. By default, clients send RPC requests to the servers on port `8300`.
-
-There are no limits to the number of client agents or services you can use with Consul, but production deployments should distribute services across multiple Consul datacenters. Using a multi-datacenter deployment enhances infrastructure resilience and limits control plane issues. We recommend deploying a maximum of 5,000 client agents per datacenter. Some large organizations have deployed tens of thousands of client agents and hundreds of thousands of service instances across a multi-datacenter deployment. Refer to [Cross-datacenter requests](#cross-datacenter-requests) for additional information.
-
-You can also run Consul with an alternate service mesh configuration that deploys Envoy proxies but not client agents. Refer to [Simplified Service Mesh with Consul Dataplanes](/consul/docs/connect/dataplane) for more information.
-
-## LAN gossip pool
-
-Client and server agents participate in a LAN gossip pool so that they can distribute and perform node [health checks](/consul/docs/services/usage/checks). Agents in the pool propagate the health check information across the cluster. Agent gossip communication occurs on port `8301` using UDP. Agent gossip falls back to TCP if UDP is not available. Refer to [Gossip Protocol](/consul/docs/architecture/gossip) for additional information.
-
-The following simplified diagram shows the interactions between servers and clients.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-## Cross-datacenter requests
-
-Each Consul datacenter maintains its own catalog of services and their health. By default, the information is not replicated across datacenters. WAN federation and cluster peering are two multi-datacenter deployment models that enable service connectivity across datacenters.
-
-### WAN federation
-
-WAN federation is an approach for connecting multiple Consul datacenters. It requires you to designate a _primary datacenter_ that contains authoritative information about all datacenters, including service mesh configurations and access control list (ACL) resources.
-
-In this model, when a client agent requests a resource in a remote secondary datacenter, a local Consul server forwards the RPC request to a remote Consul server that has access to the resource. A remote server sends the results to the local server. If the remote datacenter is unavailable, its resources are also unavailable. By default, WAN-federated servers send cross-datacenter requests over TCP on port `8300`.
-
-You can configure control plane and data plane traffic to go through mesh gateways, which simplifies networking requirements.
-
-> **Hands-on**: To enable services to communicate across datacenters when the ACL system is enabled, refer to the [ACL Replication for Multiple Datacenters](/consul/tutorials/security-operations/access-control-replication-multiple-datacenters) tutorial.
-
-#### WAN gossip pool
-
-Servers may also participate in a WAN gossip pool, which is optimized for greater latency imposed by the Internet. The pool enables servers to exchange information, such as their addresses and health, and gracefully handle loss of connectivity in the event of a failure.
-
-In the following diagram, the servers in each data center participate in a WAN gossip pool by sending data over TCP/UDP on port `8302`. Refer to [Gossip Protocol](/consul/docs/architecture/gossip) for additional information.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-### Cluster peering
-
-You can create peering connections between two or more independent clusters so that services deployed to different datacenters or admin partitions can communicate. An [admin partition](/consul/docs/enterprise/admin-partitions) is a feature in Consul Enterprise that enables you to define isolated network regions that use the same Consul servers. In the cluster peering model, you create a token in one of the datacenters or partitions and configure another datacenter or partition to present the token to establish the connection.
-
-Refer to [What is Cluster Peering?](/consul/docs/connect/cluster-peering) for additional information.
diff --git a/website/content/docs/architecture/jepsen.mdx b/website/content/docs/architecture/jepsen.mdx
deleted file mode 100644
index 44a433c8d35b..000000000000
--- a/website/content/docs/architecture/jepsen.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-layout: docs
-page_title: Consistency Verification | Jepsen Testing Results
-description: >-
- Jepsen is a tool to measure the reliability and consistency of distributed systems across network partitions. Learn about the Jepsen testing performed on Consul to ensure it gracefully recovers from partitions and maintains consistent state.
----
-
-# Jepsen Testing Results
-
-[Jepsen](http://aphyr.com/posts/281-call-me-maybe-carly-rae-jepsen-and-the-perils-of-network-partitions)
-is a tool, written by Kyle Kingsbury, designed to test the partition
-tolerance of distributed systems. It creates network partitions while fuzzing
-the system with random operations. The results are analyzed to see if the system
-violates any of the consistency properties it claims to have.
-
-As part of our Consul testing, we ran a Jepsen test to determine if
-any consistency issues could be uncovered. In our testing, Consul
-gracefully recovered from partitions without introducing any consistency
-issues.
-
-## Running the tests
-
-At the moment, testing with Jepsen is rather complex as it requires
-setting up multiple virtual machines, SSH keys, DNS configuration,
-and a working Clojure environment. We hope to contribute our Consul
-testing code upstream and to provide a Vagrant environment for Jepsen
-testing soon.
-
-## Output
-
-Below is the output captured from Jepsen. We ran Jepsen multiple times,
-and it passed each time. This output is only representative of a single
-run and has been edited for length. Please reach out on [Consul's Discuss](https://discuss.hashicorp.com/c/consul)
-if you would like to reproduce the Jepsen results.
-
-
-
-```shell-session
-$ lein test :only jepsen.system.consul-test
-
-lein test jepsen.system.consul-test
-INFO jepsen.os.debian - :n5 setting up debian
-INFO jepsen.os.debian - :n3 setting up debian
-INFO jepsen.os.debian - :n4 setting up debian
-INFO jepsen.os.debian - :n1 setting up debian
-INFO jepsen.os.debian - :n2 setting up debian
-INFO jepsen.os.debian - :n4 debian set up
-INFO jepsen.os.debian - :n5 debian set up
-INFO jepsen.os.debian - :n3 debian set up
-INFO jepsen.os.debian - :n1 debian set up
-INFO jepsen.os.debian - :n2 debian set up
-INFO jepsen.system.consul - :n1 consul nuked
-INFO jepsen.system.consul - :n4 consul nuked
-INFO jepsen.system.consul - :n5 consul nuked
-INFO jepsen.system.consul - :n3 consul nuked
-INFO jepsen.system.consul - :n2 consul nuked
-INFO jepsen.system.consul - Running nodes: {:n1 false, :n2 false, :n3 false, :n4 false, :n5 false}
-INFO jepsen.system.consul - :n2 consul nuked
-INFO jepsen.system.consul - :n3 consul nuked
-INFO jepsen.system.consul - :n4 consul nuked
-INFO jepsen.system.consul - :n5 consul nuked
-INFO jepsen.system.consul - :n1 consul nuked
-INFO jepsen.system.consul - :n1 starting consul
-INFO jepsen.system.consul - :n2 starting consul
-INFO jepsen.system.consul - :n4 starting consul
-INFO jepsen.system.consul - :n5 starting consul
-INFO jepsen.system.consul - :n3 starting consul
-INFO jepsen.system.consul - :n3 consul ready
-INFO jepsen.system.consul - :n2 consul ready
-INFO jepsen.system.consul - Running nodes: {:n1 true, :n2 true, :n3 true, :n4 true, :n5 true}
-INFO jepsen.system.consul - :n5 consul ready
-INFO jepsen.system.consul - :n1 consul ready
-INFO jepsen.system.consul - :n4 consul ready
-INFO jepsen.core - Worker 0 starting
-INFO jepsen.core - Worker 2 starting
-INFO jepsen.core - Worker 1 starting
-INFO jepsen.core - Worker 3 starting
-INFO jepsen.core - Worker 4 starting
-INFO jepsen.util - 2 :invoke :read nil
-INFO jepsen.util - 3 :invoke :cas [4 4]
-INFO jepsen.util - 0 :invoke :write 4
-INFO jepsen.util - 1 :invoke :write 1
-INFO jepsen.util - 4 :invoke :cas [4 0]
-INFO jepsen.util - 2 :ok :read nil
-INFO jepsen.util - 4 :fail :cas [4 0]
-(Log Truncated...)
-INFO jepsen.util - 4 :invoke :cas [3 3]
-INFO jepsen.util - 4 :fail :cas [3 3]
-INFO jepsen.util - :nemesis :info :stop nil
-INFO jepsen.util - :nemesis :info :stop "fully connected"
-INFO jepsen.util - 0 :fail :read nil
-INFO jepsen.util - 1 :fail :write 0
-INFO jepsen.util - :nemesis :info :stop nil
-INFO jepsen.util - :nemesis :info :stop "fully connected"
-INFO jepsen.core - nemesis done
-INFO jepsen.core - Worker 3 done
-INFO jepsen.util - 1 :invoke :read nil
-INFO jepsen.core - Worker 2 done
-INFO jepsen.core - Worker 4 done
-INFO jepsen.core - Worker 0 done
-INFO jepsen.util - 1 :ok :read 3
-INFO jepsen.core - Worker 1 done
-INFO jepsen.core - Run complete, writing
-INFO jepsen.core - Analyzing
-(Log Truncated...)
-INFO jepsen.core - Analysis complete
-INFO jepsen.system.consul - :n3 consul nuked
-INFO jepsen.system.consul - :n2 consul nuked
-INFO jepsen.system.consul - :n4 consul nuked
-INFO jepsen.system.consul - :n1 consul nuked
-INFO jepsen.system.consul - :n5 consul nuked
-1964 element history linearizable. :D
-
-Ran 1 tests containing 1 assertions.
-0 failures, 0 errors.
-```
-
-
diff --git a/website/content/docs/architecture/scale.mdx b/website/content/docs/architecture/scale.mdx
deleted file mode 100644
index 119e05454abf..000000000000
--- a/website/content/docs/architecture/scale.mdx
+++ /dev/null
@@ -1,283 +0,0 @@
----
-layout: docs
-page_title: Recommendations for operating Consul at scale
-description: >-
- When using Consul for large scale deployments, you can ensure network resilience by tailoring your network to your needs. Learn more about HashiCorp's recommendations for deploying Consul at scale.
----
-
-# Operating Consul at Scale
-
-This page describes how Consul's architecture impacts its performance with large scale deployments and shares recommendations for operating Consul in production at scale.
-
-## Overview
-
-Consul is a distributed service networking system deployed as a centralized set of servers that coordinate network activity using sidecars that are located alongside user workloads. When Consul is used for its service mesh capabilities, servers also generate configurations for Envoy proxies that run alongside service instances. These proxies support service mesh capabilities like end-to-end mTLS and progressive deployments.
-
-Consul can be deployed in either a single datacenter or across multiple datacenters by establishing WAN federation or peering connections. In this context, a datacenter refers to a named environment whose hosts can communicate with low networking latency. Typically, users map a Consul datacenter to a cloud provider region such as AWS `us-east-1` or Azure `East US`.
-
-To ensure consistency and high availability, Consul servers share data using the [Raft consensus protocol](/consul/docs/architecture/consensus). When persisting data, Consul uses BoltDB to store Raft logs and a custom file format for state snapshots. For more information, refer to [Consul architecture](/consul/docs/architecture).
-
-## General deployment recommendations
-
-This section provides general configuration and monitoring recommendations for operating Consul at scale.
-
-### Data plane resiliency
-
-To make service-to-service communication resilient against outages and failures, we recommend spreading multiple service instances for a service across fault domains. Resilient deployments spread services across multiples of the following:
-
-- Infrastructure-level availability zones
-- Runtime platform instances, such as Kubernetes clusters
-- Consul datacenters
-
-In the event that any individual domain experiences a failure, service failover ensures that healthy instances in other domains remain discoverable. Consul automatically provides service failover between instances within a single [admin partition](/consul/docs/enterprise/admin-partitions) or datacenter.
-
-Service failover across Consul datacenters must be configured in the datacenters before you can use it. Use one of the following methods to configure failover across datacenters:
-
-- **If you are using Consul service mesh**: Implement failover using [service-resolver configuration entries](/consul/docs/connect/config-entries/service-resolver#failover).
-- **If you are using Consul service discovery without service mesh**: Implement [geo-redundant failover using prepared queries](/consul/tutorials/developer-discovery/automate-geo-failover).
-
-### Control plane resiliency
-
-When a large number services are deployed to a single datacenter, the Consul servers may experience slower network performance. To make the control plane more resilient against slowdowns and outages, limit the size of individual datacenters by spreading deployments across availability zones, runtimes, and datacenters.
-
-#### Datacenter size
-
-To ensure resiliency, we recommend limiting deployments to a maximum of 5,000 Consul client agents per Consul datacenter. There are two reasons for this recommendation:
-
-1. **Blast radius reduction**: When Consul suffers a server outage in a datacenter or region, _blast radius_ refers to the number of Consul clients or dataplanes attached to that datacenter that can no longer communicate as a result. We recommend limiting the total number of clients attached to a single Consul datacenter in order to reduce the size of its blast radius. Even though Consul is able to run clusters with 10,000 or more nodes, it takes longer to bring larger deployments back online after an outage, which impacts time to recovery.
-1. **Agent gossip management**: Consul agents use the [gossip protocol](/consul/docs/architecture/gossip) to share membership information in a gossip pool. By default, all client agents in a single Consul datacenter are in a single gossip pool. Whenever an agent joins or leaves the gossip pool, the other agents propagate that event throughout the pool. If a Consul datacenter experiences _agent churn_, or a consistently high rate of agents joining and leaving a single pool, cluster performance may be affected by gossip messages being generated faster than they can be transmitted. The result is an ever-growing message queue.
-
-To mitigate these risks, we recommend a maximum of 5,000 Consul client agents in a single gossip pool. There are several strategies for making gossip pools smaller:
-
-1. Run exactly one Consul agent per host in the infrastructure.
-1. Break up the single Consul datacenter into multiple smaller datacenters.
-1. Enterprise users can define [network segments](/consul/docs/enterprise/network-segments/network-segments-overview) to divide the single gossip pool in the Consul datacenter into multiple smaller pools.
-
-If appropriate for your use case, we recommend breaking up a single Consul datacenter into multiple smaller datacenters. Running multiple datacenters reduces your network’s blast radius more than applying network segments.
-
-Be aware that the number 5,000 is a heuristic for deployments. The number of agents you deploy per datacenter is limited by performance, not Consul itself. Because gossip stability risk is determined by _the rate of agent churn_ rather than _the number of nodes_, a gossip pool with mostly static nodes may be able to operate effectively with more than 5,000 agents. Meanwhile, a gossip pool with highly dynamic agents, such as spot fleet instances and serverless functions where 10% of agents are replaced each day, may need to be smaller than 5,000 agents.
-
-For additional information about the specific tests we conducted on Consul deployments at scale in order to generate these recommendations, refer to [Consul Scale Test Report to Observe Gossip Stability](https://www.hashicorp.com/blog/consul-scale-test-report-to-observe-gossip-stability) on the HashiCorp blog.
-
-For most use cases, a limit of 5,000 agents is appropriate. When the `consul.serf.queue.Intent` metric is consistently high, it is an indication that the gossip pool cannot keep up with the sustained level of churn. In this situation, reduce the churn by lowering the number agents per datacenter.
-
-#### Kubernetes-specific guidance
-
-In Kubernetes, even though it is possible to deploy Consul agents inside pods alongside services running in the same pod, this unsupported deployment pattern has known performance issues at scale. At large volumes, pod registration and deregistration in Kubernetes causes gossip instability that can lead to cascading failures as services are marked unhealthy, resulting in further cluster churn.
-
-In Consul v1.14 and higher, Consul on Kubernetes does not need to run client agents on every node in a cluster for service discovery and service mesh. This deployment configuration lowers Consul’s resource usage in the data plane, but requires additional resources in the control plane to process [xDS resources](/consul/docs/agent/config/config-files#xds-server-parameters). To learn more, refer to [simplified service mesh with Consul Dataplane](/consul/docs/connect/dataplane).
-
-**If you use Kubernetes and Consul as a backend for Vault**: Use Vault’s integrated storage backend instead of Consul. A runtime dependency conflict prevents Consul dataplanes from being compatible with Vault. If you need to use Consul v1.14 and higher as a backend for Vault in your Kubernetes deployment, create a separate Consul datacenter that is not federated or peered to your other Consul servers. You can size this datacenter according to your needs and use it exclusively for backend storage for Vault.
-
-## Consul server deployment recommendations
-
-Consul server agents are an important part of Consul’s architecture. This section summarizes the differences between running managed and self-managed servers, as well as recommendations on the number of servers to run, how to deploy servers across redundancy zones, hardware requirements, and cloud provider integrations.
-
-### Consul server runtimes
-
-Consul servers can be deployed on a few different runtimes:
-
-- **HashiCorp Cloud Platform (HCP) Consul Dedicated**. These Consul servers are deployed in a hosted environment managed by HCP. To get started with HCP Consul Dedicated servers in Kubernetes or VM deployments, refer to the [Deploy HCP Consul Dedicated tutorial](/consul/tutorials/get-started-hcp/hcp-gs-deploy).
-- **VMs or bare metal servers (Self-managed)**. To get started with Consul on VMs or bare metal servers, refer to the [Deploy Consul server tutorial](/consul/tutorials/get-started-vms/virtual-machine-gs-deploy). For a full list of configuration options, refer to [Agents Overview](/consul/docs/agent).
-- **Kubernetes (Self-managed)**. To get started with Consul on Kubernetes, refer to the [Deploy Consul on Kubernetes tutorial](/consul/tutorials/get-started-kubernetes/kubernetes-gs-deploy).
-- **Other container environments, including Docker, Rancher, and Mesos (Self-managed)**.
-
-When operating Consul at scale, self-managed VM or bare metal server deployments offer the most flexibility. Some Consul Enterprise features that can enhance fault tolerance and read scalability, such as [redundancy zones](/consul/docs/enterprise/redundancy) and [read replicas](/consul/docs/enterprise/read-scale), are not available to server agents on Kubernetes runtimes. To learn more, refer to [Consul Enterprise feature availability by runtime](/consul/docs/enterprise#feature-availability-by-runtime).
-
-### Number of Consul servers
-
-Determining the number of Consul servers to deploy on your network has two key considerations:
-
-1. **Fault tolerance**: The number of server outages your deployment can tolerate while maintaining quorum. Additional servers increase a network’s fault tolerance.
-1. **Performance scalability**: To handle more requests, additional servers produce latency and slow the quorum process. Having too many servers impedes your network instead of helping it.
-
-Fault tolerance should determine your initial decision for how many Consul server agents to deploy. Our recommendation for the number of servers to deploy depends on whether you have access to Consul Enterprise redundancy zones:
-
-- **With redundancy zones**: Deploy 6 Consul servers across 3 availability zones. This deployment provides the performance of a 3 server deployment with the fault tolerance of a 7 server deployment.
-- **Without redundancy zones**: Deploy 5 Consul servers across 3 availability zones. All 5 servers should be voting servers, not [read replicas](/consul/docs/enterprise/read-scale).
-
-For more details, refer to [Improving Consul Resilience](/consul/docs/architecture/improving-consul-resilience).
-
-### Server requirements
-
-To ensure your server nodes are a sufficient size, we recommend reviewing [hardware sizing for Consul servers](/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers). If your network needs to handle heavy workloads, refer to our recommendations in [read-heavy workload sources and solutions](#read-heavy-workload-sources-and-solutions) and [write-heavy workload sources and solutions](#write-heavy-workload-sources-and-solutions).
-
-#### File descriptors
-
-Consul's agents use network sockets for gossip communication with the other nodes and agents. As a result, servers create file descriptors for connections from clients, connections from other servers, watch handlers, health checks, and log files. For write-heavy clusters, you must increase the size limit for the number of file descriptions from the default value, 1024. We recommend using a number that is two times higher than your expected number of clients in the cluster.
-
-#### Auto scaling groups
-
-Auto scaling groups (ASGs) are infrastructure associations in cloud providers used to ensure a specific number of replicas are available for a deployment. When using ASGs for Consul servers, there are specific requirements and processes for bootstrapping Raft and maintaining quorum.
-
-We recommend using the [`bootstrap-expect` command-line flag](/consul/docs/agent/config/cli-flags#_bootstrap_expect) during cluster creation. However, if you spawn new servers to add to a cluster or upgrade servers, do not configure them to automatically bootstrap. If `bootstrap-expect` is set on these replicas, it is possible for them to create a separate Raft system, which causes a _split brain_ and leads to errors and general cluster instability.
-
-#### NUMA architecture awareness
-
-Some cloud providers offer extremely large instance sizes with Non-Uniform Memory Access (NUMA) architectures. Because the Go runtime is not NUMA aware, Consul is not NUMA aware. Even though you can run Consul on NUMA architecture, it will not take advantage of the multiprocessing capabilities.
-
-### Consistency modes
-
-Consul offers different [consistency modes](/consul/api-docs/features/consistency#stale) for both its DNS and HTTP APIs.
-
-#### DNS
-
-We strongly recommend using [stale consistency mode for DNS lookups](/consul/api-docs/features/consistency#consul-dns-queries) to optimize for performance over consistency when operating at scale. It is enabled by default and configured with `dns_config.allow_stale`.
-
-We also recommend that you do not configure [`dns_config.max_stale` to limit the staleness of DNS responses](/consul/api-docs/features/consistency#limiting-staleness-advanced-usage), as it may result in a prolonged outage if your Consul servers become overloaded. If bounded result consistency is required by a service, consider modifying the service to use consistent service discovery HTTP API queries instead of DNS lookups.
-
-Avoid using [`dns_config.use_cache`](/consul/docs/agent/config/config-files#dns_use_cache) when operating Consul at scale. Because the Consul agent cache allocates memory for each requested route and each allocation can live up to 3 days, severe memory issues may occur. To implement DNS caching, we instead recommend that you [configure TTLs for services and nodes](/consul/docs/services/discovery/dns-cache#ttl) to enable the DNS client to cache responses from Consul.
-
-#### HTTP API
-
-By default, all HTTP API read requests use the [`default` consistency mode](/consul/api-docs/features/consistency#default-1) unless overridden on a per-request basis. We do not recommend changing the default consistency mode for HTTP API requests.
-
-We also recommend that you do not configure [`http_config.discovery_max_stale`](/consul/api-docs/features/consistency#changing-the-default-consistency-mode-advanced-usage) to limit the staleness of HTTP responses.
-
-## Resource usage and metrics recommendations
-
-While operating Consul, monitor the CPU load on the Consul server agents and use metrics from agent telemetry to figure out the cause. Procedures for mitigating heavy resource usage depend on whether the load is caused by read operations, write operations, or Consul’s consensus protocol.
-
-### Read-heavy workload sources and solutions
-
-The highest CPU load usually belongs to the current leader. If the CPU load is high, request load is likely a major contributor. Check the following [server health metrics](/consul/docs/agent/telemetry#server-health):
-
-- `consul.rpc.*` - Traditional RPC metrics. The most relevant metrics for understanding server CPU load in read-heavy workloads are `consul.rpc.query` and `consul.rpc.queries_blocking`.
-- `consul.grpc.server.*` - Metrics for the number of streams being processed by the server.
-- `consul.xds.server.*` - Metrics for the Envoy xDS resources being processed by the server. In Consul v1.14 and higher, these metrics have the potential to become a significant source of read load. Refer to [Consul dataplanes](/consul/docs/connect/dataplane) for more information.
-
-Depending on your needs, choose one of the following strategies to mitigate server CPU load:
-
-- The fastest mitigation strategy is to vertically scale servers. However, this strategy increases compute costs and does not scale indefinitely.
-- The most effective long term mitigation strategy is to use [stale consistency mode](/consul/api-docs/features/consistency#stale) for as many read requests as possible. In Consul v1.12 and higher, operators can use the [`consul.rpc.server.call` metric](/consul/docs/agent/telemetry#server-workload) to identify the most frequent type of read requests made to the Consul servers. Cross reference the results with each endpoint’s [HTTP API documentation](/consul/api-docs) and use stale consistency for endpoints that support it.
-- If most read requests already use stale consistency mode and you still need to reduce your request load, add more non-voting servers to your deployment. You can use either [redundancy zones](/consul/docs/enterprise/redundancy) or [read replicas](/consul/docs/enterprise/read-scale) to scale reads without impacting write latency. We recommend adding more servers to redundancy zones because they improve both fault tolerance and stale read scalability.
-- In Consul v1.14 and higher, servers handle Envoy XDS streams for [Consul Dataplane deployments](/consul/docs/connect/dataplane) in stale consistency mode. As a result, server consistency mode is not configurable. Use the `consul.xds.server.*` metrics to identify issues related to XDS streams.
-
-### Write-heavy workload sources and solutions
-
-Consul is write-limited by disk I/O. For write-heavy workloads, we recommend using NVMe disks.
-
-As a starting point, you should make sure your hardware meets the requirements for [large size server clusters](/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers), which has 7500+ IOps and 250+ MB/s disk throughput. IOps should be around 5 to 10 times the expected write rate. Conduct further analysis around disk sizing and your expected write rates to understand your network’s specific needs.
-
-If you use network storage, such as AWS EBS, we recommend provisioned I/O volumes. While general purpose volumes function properly, their burstable IOps make it harder to capacity plan. A small peak in writes may not trigger alerts, but as usage grows you may reach a point where the burst limit runs out and workload performance worsens.
-
-For more information, refer to the [server performance read/write tuning](/consul/docs/install/performance#read-write-tuning).
-
-### Raft database performance sources and solutions
-
-Consul servers use the [Raft consensus protocol](/consul/docs/architecture/consensus) to maintain a consistent and fault-tolerant state. Raft stores most Consul data in a MemDB database, which is an in-memory database with indexing. In order to tolerate restarts and power outages, Consul writes Raft logs to disk using BoltDB. Refer to [Agent telemetry](/consul/docs/agent/telemetry) for more information on metrics for detecting write health.
-
-To monitor overall transaction performance, check for spikes in the [Transaction timing metrics](/consul/docs/agent/telemetry#transaction-timing). You can also use the [Raft replication capacity issues metrics](/consul/docs/agent/telemetry#raft-replication-capacity-issues) to monitor Raft log snapshots and restores, as spikes and longer durations can be symptoms of overall write and disk contention issues.
-
-In Consul v1.11 and higher, you can also monitor Raft performance with the [`consul.raft.boltdb.*` metrics](/consul/docs/agent/telemetry#bolt-db-performance). We recommend monitoring `consul.raft.boltdb.storeLogs` for increased activity above normal operating patterns.
-
-Refer to [Consul agent telemetry](/consul/docs/agent/telemetry#bolt-db-performance) for more information on agent metrics and how to use them.
-
-#### Raft database size
-
-Raft writes logs to BoltDB, which is designed as a single grow-only file. As a result, if you add 1GB of log entries and then you take a snapshot, only a small number of recent log entries may appear in the file. However, the actual file on disk never shrinks smaller than the 1GB size it grew.
-
-If you need to reclaim disk space, use the `bbolt` CLI to copy the data to a new database and repoint to the new database in the process. However, be aware that the `bbolt compact` command requires the database to be offline while being pointed to the new database.
-
-In many cases, including in large clusters, disk space is not a primary concern because Raft logs rarely grow larger than a small number of GiB. However, an inflated file with lots of free space significantly degrades write performance overall due to _freelist management_.
-
-After they are written to disk, Raft logs are eventually captured in a snapshot and log nodes are removed from BoltDB. BoltDB keeps track of the pages for the removed nodes in its freelist. BoltDB also writes this freelist to disk every time there is a Raft write. When the Raft log grows large quickly and then gets truncated, the size of the freelist can become very large. In the worst case reported to us, the freelist was over 10MB. When this large freelist is written to disk on every Raft commit, the result is a large write amplification for what should be a small Raft commit.
-
-To figure out if a Consul server’s disk performance issues are the result of BoldDB’s freelist, try the following strategies:
-
-- Compare network bandwidth inbound to the server against disk write bandwidth. If _disk write bandwidth_ is greater than or equal to 5 times the _inbound network bandwidth_, the disks are likely experiencing freelist management performance issues. While BoltDB freelist may cause problems at ratios lower than 5 to 1, high write bandwidth to inbound bandwidth ratios are a reliable indicator that BoltDB freelist is causing a problem.
-- Use the [`consul.raft.leader.dispatchLog` metric](/consul/docs/agent/telemetry#server-health) to get information about how long it takes to write a batch of logs to disk.
-- In Consul v1.13 and higher, you can use [Raft thread saturation metrics](/consul/docs/agent/telemetry#raft-thread-saturation) to figure out if Raft is experiencing back pressure and is unable to accept new work due disk limitations.
-
-In Consul v1.11 and higher, you can prevent BoltDB from writing the freelist to disk by setting [`raftboltdb.NoFreelistSync`](/consul/docs/agent/config/config-files#NoFreelistSync) to `true`. This setting causes BoltDB to retain the freelist in memory instead. However, be aware that when BoltDB restarts, it needs to scan the database file to manually create the freelist. Small delays in startup may occur. On a fast disk, we measured these delays at the order of tens of seconds for a raft.db file that was 5GiB in size with only 250MiB of used pages.
-
-In general, set [`raftboltdb.NoFreelistSync`](/consul/docs/agent/config/config-files#NoFreelistSync) to `true` to produce the following effects:
-
-- Reduce the amount of data written to disk
-- Increase the amount of time it takes to load the raft.db file on startup
-
-We recommend operators optimize networks according to their individual concerns. For example, if your server runs into disk performance issues but Consul servers do not restart often, setting [`raftboltdb.NoFreelistSync`](/consul/docs/agent/config/config-files#NoFreelistSync) to `true` may solve your problems. However, the same action causes issues for deployments with large database files and frequent server restarts.
-
-#### Raft snapshots
-
-Each state change produces a Raft log entry, and each Consul server receives the same sequence of log entries, which results in servers sharing the same state. The sequence of Raft logs is periodically compacted by the leader into a _snapshot_ of state history. These snapshots are internal to Raft and are not the same as the snapshots generated through Consul's API, although they contain the same data. Raft snapshots are stored in the server's data directory in the `raft/` folder, alongside the logs in `raft.db`.
-
-When you add a new Consul server, it must catch up to the current state. It receives the latest snapshot from the leader followed by the sequence of logs between that snapshot and the leader’s current state. Each Raft log has a sequence number and each snapshot contains the last sequence number included in the snapshot. A combination of write-heavy workloads, a large state, congested networks, or busy servers makes it possible for new servers to struggle to catch up to the current state before the next log they need from the leader has already been truncated. The result is a _snapshot install loop_.
-
-For example, if snapshot A on the leader has an index of 99 and the current index is 150, then when a new server comes online the leader streams snapshot A to the new server for it to restore. However, this snapshot only enables the new server to catch up to index 99. Not only does the new server still need to catch up to index 150, but the leader continued to commit Raft logs in the meantime.
-
-When the leader takes snapshot B at index 199, it truncates the logs that accumulated between snapshot A and snapshot B, which means it truncates Raft logs with indexes between 100 and 199.
-
-Because the new server restored snapshot A, the new server has a current index of 99. It requests logs 100 to 150 because index 150 was the current index when it started the replication restore process. At this point, the leader recognizes that it only has logs 200 and higher, and does not have logs for indexes 100 to 150. The leader determines that the new server’s state is stale and starts the process over by sending the new server the latest snapshot, snapshot B.
-
-Consul keeps a configurable number of [Raft trailing logs](/consul/docs/agent/config/config-files#raft_trailing_logs) to prevent the snapshot install loop from repeating. The trailing logs are the last logs that went into the snapshot, and the new server can more easily catch up to the current state using these logs. The default Raft trailing logs configuration value is suitable for most deployments.
-
-In Consul v1.10 and higher, operators can try to prevent a snapshot install loop by monitoring and comparing Consul servers’ `consul.raft.rpc.installSnapshot` and `consul.raft.leader.oldestLogAge` timing metrics. Monitor these metrics for the following situations:
-
-- After truncation, the lowest number on `consul.raft.leader.oldestLogAge` should always be at least two times higher than the lowest number for `consul.raft.rpc.installSnapshot`.
-- If these metrics are too close, increase the number of Raft trailing logs, which increases `consul.raft.leader.oldestLogAge`. Do not set the Raft trailing logs higher than necessary, as it can negatively affect write throughput and latency.
-
-For more information, refer to [Raft Replication Capacity Issues](/consul/docs/agent/telemetry#raft-replication-capacity-issues).
-
-## Performance considerations for specific use cases
-
-This section provides configuration and monitoring recommendations for Consul deployments according to the features you prioritize and their use cases.
-
-### Service discovery
-
-To optimize performance for service discovery, we recommend deploying multiple small clusters with consistent numbers of service instances and watches.
-
-Several factors influence Consul performance at scale when used primarily for its service discovery and health check features. The factors you have control over include:
-
-- The overall number of registered service instances
-- The use of [stale reads](/consul/api-docs/features/consistency#consul-dns-queries) for DNS queries
-- The number of entities, such as Consul client agents or dataplane components, that are monitoring Consul for changes in a service's instances, including registration and health status. When any service change occurs, all of those entities incur a computational cost because they must process the state change and reconcile it with previously known data for the service. In addition, the Consul server agents also incur a computational cost when sending these updates.
-- Number of [watches](/consul/docs/dynamic-app-config/watches) monitoring for changes to a service.
-- Rate of catalog updates, which is affected by the following events:
- - A service instance’s health check status changes
- - A service instance’s node loses connectivity to Consul servers
- - The contents of the [service definition file](/consul/docs/services/configuration/services-configuration-reference) changes
- - Service instances are registered or deregistered
- - Orchestrators such as Kubernetes or Nomad move a service to a new node
-
-These factors can occur in combination with one another. Overall, the amount of work the servers complete for service discovery is the product of these factors:
-
-- Data size, which changes as the number of services and service instances increases
-- The catalog update rate
-- The number of active watches
-
-Because it is typical for these factors to increase in number as clusters grow, the CPU and network resources the servers require to distribute updates may eventually exceed linear growth.
-
-In situations where you can’t run a Consul client agent alongside the service instance you want to register with Consul, such as instances hosted externally or on legacy infrastructure, we recommend using [Consul ESM](https://github.com/hashicorp/consul-esm).
-
-Consul ESM enables health checks and monitoring for external services. When using Consul ESM, we recommend running multiple instances to ensure redundancy.
-
-### Service mesh
-
-Because Consul’s service mesh uses service discovery subsystems, service mesh performance is also optimized by deploying multiple small clusters with consistent numbers of service instances and watches. Service mesh performance is influenced by the following additional factors:
-
-- The [transparent proxy](/consul/docs/connect/transparent-proxy) feature causes client agents to listen for service instance updates across all services instead of a subset. To prevent performance issues, we recommend that you do not use the permissive intention, `default: allow`, with the transparent proxy feature. When combined, every service instance update propagates to every proxy, which causes additional server load.
-- When you use the [built-in service mesh CA provider](/consul/docs/connect/ca/consul#built-in-ca), Consul leaders are responsible for signing certificates used for mTLS across the service mesh. The impact on CPU utilization depends on the total number of service instances and configured certificate TTLs. You can use the [CA provider configuration options](/consul/docs/agent/config/config-files#common-ca-config-options) to control the number of requests a server processes. We recommend adjusting [`csr_max_concurrent`](/consul/docs/agent/config/config-files#ca_csr_max_concurrent) and [`csr_max_per_second`](/consul/docs/agent/config/config-files#ca_csr_max_concurrent) to suit your environment.
-
-### K/V store
-
-While the K/V store in Consul has some similarities to object stores we recommend that you do not use it as a primary application data store.
-
-When using Consul's K/V store for application configuration and metadata, we recommend the following to optimize performance:
-
-- Values must be below 512 KB and transactions should be below 64 operations.
-- The keyspace must be well bound. While 10,000 keys may not affect performance, millions of keys are more likely to cause performance issues.
-- Total data size must fit in memory, with additional room for indexes. We recommend that the in-memory size is 3 times the raw key value size.
-- Total data size should remain below 1 GB. Larger snapshots are possible on suitably fast hardware, but they significantly increase recovery times and the operational complexity needed for replication. We recommend limiting data size to keep the cluster healthy and able to recover during maintenance and outages.
-- The K/V store is optimized for reading. To know when you need to make changes to server resources and capacity, we recommend carefully monitoring update rates after they exceed more than a hundred updates per second across the cluster.
-- We recommend that you do not use the K/V store as a general purpose database or object store.
-
-In addition, we recommend that you do not use the [blocking query mechanism](/consul/api-docs/features/blocking) to listen for updates when your K/V store’s update rate is high. When a K/V result is updated too fast, blocking query loops degrade into busy loops. These loops consume excessive client CPU and cause high server load until appropriately throttled. Watching large key prefixes is unlikely to solve the issue because returning the entire key prefix every time it updates can quickly consume a lot of bandwidth.
-
-### Backend for Vault
-
-At scale, using Consul as a backend for Vault results in increased memory and CPU utilization on Consul servers. It also produces unbounded growth in Consul’s data persistence layer that is proportional to both the amount of data being stored in Vault and the rate the data is updated.
-
-In situations where Consul handles large amounts of data and has high write throughput, we recommend adding monitoring for the [capacity and health of raft replication on servers](/consul/docs/agent/telemetry#raft-replication-capacity-issues). If the server experiences heavy load when the size of its stored data is large enough, a follower may be unable to catch up on replication and become a voter after restarting. This situation occurs when the time it takes for a server to restore from disk takes longer than it takes for the leader to write a new snapshot and truncate its logs. Refer to [Raft snapshots](#raft-snapshots) for more information.
-
-Vault v1.4 and higher provides [integrated storage](/vault/docs/concepts/integrated-storage) as its recommended storage option. If you currently use Consul as a storage backend for Vault, we recommend switching to integrated storage. For a comparison between Vault's integrated storage and Consul as a backend for Vault, refer to [storage backends in the Vault documentation](/vault/docs/configuration/storage#integrated-storage-vs-consul-as-vault-storage). For detailed guidance on migrating the Vault backend from Consul to Vault's integrated storage, refer to the [storage migration tutorial](/vault/docs/configuration/storage#integrated-storage-vs-consul-as-vault-storage). Integrated storage improves resiliency by preventing a Consul outage from also affecting Vault functionality.
diff --git a/website/content/docs/architecture/security.mdx b/website/content/docs/architecture/security.mdx
new file mode 100644
index 000000000000..32b588b3d459
--- /dev/null
+++ b/website/content/docs/architecture/security.mdx
@@ -0,0 +1,22 @@
+---
+layout: docs
+page_title: Consul security architecture
+description: >-
+ Consul includes built-in security features and options to integrate exisiting security features to secure communication between users, agents in the control plane, and services in the data plane.
+---
+
+# Consul security architecture
+
+This page introduces the parts of Consul's architecture that secure communication between users, Consul agents in the control plane, and communication between services in your application's data plane.
+
+## Control plane
+
+@include 'tables/compare/architecture/security/control-plane.mdx'
+
+## Data plane
+
+@include 'tables/compare/architecture/security/data-plane.mdx'
+
+## Best practices
+
+@include 'text/best-practice/architecture/security.mdx'
\ No newline at end of file
diff --git a/website/content/docs/dynamic-app-config/sessions/application-leader-election.mdx b/website/content/docs/automate/application-leader-election.mdx
similarity index 94%
rename from website/content/docs/dynamic-app-config/sessions/application-leader-election.mdx
rename to website/content/docs/automate/application-leader-election.mdx
index 5b14bcdc9e10..796706f429cd 100644
--- a/website/content/docs/dynamic-app-config/sessions/application-leader-election.mdx
+++ b/website/content/docs/automate/application-leader-election.mdx
@@ -7,16 +7,17 @@ description: >-
# Application leader election
-This topic describes the process for building client-side leader elections for service instances using Consul's [session mechanism for building distributed locks](/consul/docs/dynamic-app-config/sessions) and the [Consul key/value store](/consul/docs/dynamic-app-config/kv), which is Consul's key/value datastore.
+This topic describes the process for building client-side leader elections for service instances using Consul's [session mechanism for building distributed locks](/consul/docs/automate/session) and the [Consul key/value store](/consul/docs/automate/kv), which is Consul's key/value datastore.
-This topic is not related to Consul's leader election. For more information about the Raft leader election used internally by Consul, refer to
-[consensus protocol](/consul/docs/architecture/consensus) documentation.
+This topic is not related to Consul's leader election. For more information
+about the Raft leader election used internally by Consul, refer to the
+[consensus protocol](/consul/docs/concept/consensus) documentation.
## Background
Some distributed applications, like HDFS or ActiveMQ, require setting up one instance as a leader to ensure application data is current and stable.
-Consul's support for [sessions](/consul/docs/dynamic-app-config/sessions) and [watches](/consul/docs/dynamic-app-config/watches) allows you to build a client-side leader election process where clients use a lock on a key in the KV datastore to ensure mutual exclusion and to gracefully handle failures.
+Consul's support for [sessions](/consul/docs/automate/session) and [watches](/consul/docs/automate/watch) allows you to build a client-side leader election process where clients use a lock on a key in the KV datastore to ensure mutual exclusion and to gracefully handle failures.
All service instances that are participating should coordinate on a key format. We recommend the following pattern:
@@ -32,7 +33,7 @@ service//leader
- `session:write` permissions over the service session name
- `key:write` permissions over the key
- The `curl` command
-
+
Expose the token using the `CONSUL_HTTP_TOKEN` environment variable.
## Client-side leader election procedure
@@ -54,7 +55,7 @@ The workflow for building a client-side leader election process has the followin
## Create a new session
-Create a configuration for the session.
+Create a configuration for the session.
The minimum viable configuration requires that you specify the session name. The following example demonstrates this configuration.
@@ -203,7 +204,7 @@ Error! Did not acquire lock
This example used the node's `hostname` as the key data. This data can be used by the other services to create configuration files.
-Be aware that this locking system has no enforcement mechanism that requires clients to acquire a lock before they perform an operation. Any client can read, write, and delete a key without owning the corresponding lock.
+Be aware that this locking system has no enforcement mechanism that requires clients to acquire a lock before they perform an operation. Any client can read, write, and delete a key without owning the corresponding lock.
## Watch the KV key for locks
@@ -393,4 +394,3 @@ Success! Lock released on: service/leader
After a lock is released, the key data do not show a value for `Session` in the results.
Other clients can use this as a way to coordinate their lock requests.
-
diff --git a/website/content/docs/automate/consul-template/configure.mdx b/website/content/docs/automate/consul-template/configure.mdx
new file mode 100644
index 000000000000..21982dca1b08
--- /dev/null
+++ b/website/content/docs/automate/consul-template/configure.mdx
@@ -0,0 +1,54 @@
+---
+layout: docs
+page_title: Configure Consul Template
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Configure Consul Template
+
+You can configure Consul Template with a configuration file written in the [HashiCorp Configuration Language](https://github.com/hashicorp/hcl). The configuration is also JSON compatible.
+For a full list of configuration options, refer to the [Consul Template configuration reference](/consul/docs/reference/consul-template/configuration).
+
+To start using Consul Template, run the Consul Template CLI with the `-config` flag pointing at the configuration file:
+
+```shell-session
+$ consul-template -config "/my/config.hcl"
+```
+
+Specify this argument multiple times to load multiple configuration files. The right-most configuration takes the highest precedence. If you provide the path to a directory instead of a file, all of the files in the given directory are merged in [lexical order](http://golang.org/pkg/path/filepath/#Walk), recursively. Be aware that symbolic links are not followed.
+
+The full list of available commands and options is available in the [Consul Template CLI reference](/consul/docs/reference/consul-template/cli).
+
+~> **Note** Commands specified on the CLI take precedence over a config file.
+
+Not all available fields for the configuration file are required. For example, if you are not retrieving secrets from Vault, you do not need to specify a Vault configuration section. Similarly, if you are not logging to syslog, you do not need to specify a syslog configuration.
+
+For additional security, tokens may also be read from the environment using the `CONSUL_TOKEN` or `VAULT_TOKEN` environment variables. We recommend that you do not include plain-text tokens in a configuration file.
+
+## Example
+
+The following example configures Consul Template for a Consul agent and renders a template with a value from [the Consul KV store](/consul/docs/automate/kv). It writes the output to a file on disk.
+
+```hcl
+consul {
+ address = "127.0.0.1:8500"
+
+ auth {
+ enabled = true
+ username = "test"
+ password = "test"
+ }
+}
+
+log_level = "warn"
+
+template {
+ contents = "{{key \"hello\"}}"
+ destination = "out.txt"
+ exec {
+ command = "cat out.txt"
+ }
+}
+```
+
diff --git a/website/content/docs/automate/consul-template/index.mdx b/website/content/docs/automate/consul-template/index.mdx
new file mode 100644
index 000000000000..519b8035b7c1
--- /dev/null
+++ b/website/content/docs/automate/consul-template/index.mdx
@@ -0,0 +1,173 @@
+---
+layout: docs
+page_title: Consul Template
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Consul Template
+
+This topic provides an overview of the Consul Template tool, which enables a programmatic method for rendering configuration files from a variety of locations, including the Consul KV store. It is an effective workflow option for replacing complicated API queries that often require custom formatting.
+
+For more information about the KV store and using it to automatically configure application deployments, refer to [Consul key/value (KV) store overview](/consul/docs/automate/kv).
+
+## Introduction
+
+The Consul template tool is not part of the Consul binary. It has a [dedicated GitHub repo](https://github.com/hashicorp/consul-template) and you must [install Consul Template](/consul/docs/automate/consul-template/install) before running it on the command line.
+
+Consul templates are based on Go templates and shares many of the same attributes. When initiated, Consul Template reads one or more template files and queries Consul for data to render the full configuration.
+
+In a typical scenario, you run Consul Template as a daemon that fetches the initial values and then continues to watch for updates. The template re-renders whenever there are relevant changes in the datacenter. The template can also run arbitrary commands after the update process completes. For example, it can send the HUP signal to the load balancer service after a configuration change has been made.
+
+The Consul template tool is flexible, and it can fit into many different environments and workflows. Depending on the use case, you may have a single Consul Template instance on a handful of hosts, or you may need to run several instances on every host. Each Consul Template process can manage multiple unrelated files and removes duplicated information as needed when files share data dependencies.
+
+Use Consul Template in the following situations:
+
+1. **Update configuration files**. The Consul Template tool can be used to update service configuration files. A common use case is managing load balancer configuration files that need to be updated regularly in a dynamic infrastructure.
+
+1. **Update configuration secrets**. You can configure Consul Template to use Vault secret engines to generate secrets for your application's configuration, including encryption keys, TLS certificates, passwords. Consul Template renders these secrets into files or environment variables for secure consumption by your application.
+
+1. **Discover data about the Consul datacenter and service**. It is possible to collect information about the services in your Consul datacenter. For example, you could
+ collect a list of all services running on the datacenter or you could discover all service addresses for the Redis service. Be aware that this use case has limited scope for production.
+
+## Workflow
+
+A typical workflow to add Consul Template to your datacenter consists of the following steps:
+
+1. [Install Consul Template](/consul/docs/automate/consul-template/install) on nodes in your datacenter.
+1. [Configure Consul Template](/consul/docs/automate/consul-template/configure) on nodes on your datacenter.
+1. Create a template for the data you need to retrieve. There are different options in order of complexity:
+ 1. Use an inline template for simple requests using the [`contents`](/consul/docs/reference/consul-template/configuration#contents) parameter.
+ 1. Create a template file using the [available functions](/consul/docs/reference/consul-template/go).
+ 1. [Define a custom plugin](/consul/docs/automate/consul-template/plugins) to execute custom data manipulation.
+1. [Render the template](/consul/docs/automate/consul-template/render) and apply it to your datacenter.
+1. [View Consul Template logs](/consul/docs/automate/consul-template/log) to monitor execution and debug issues.
+
+## Use case: Consul KV
+
+In this example, you render a template that pulls the HashiCorp address from Consul KV. To do this, you create a template that contains the HashiCorp address, run the `consul-template` command, add a value to Consul KV for HashiCorp's address, and finally view the rendered file.
+
+
+
+First, create a template file `find_address.tpl` to query Consul's KV store.
+
+
+
+```go
+{{ key "/hashicorp/street_address" }}
+```
+
+
+
+Next, run `consul-template` specifying both the template to use and the file to update.
+
+```shell-session
+$ consul-template -template "find_address.tpl:hashicorp_address.txt"
+```
+
+The `consul-template` process will continue to run until you kill it with `CTRL+C`. For now, leave it running.
+
+Finally, open a new terminal so you can write data to the KV store in Consul using the command line interface.
+
+```shell-session
+$ consul kv put hashicorp/street_address "101 2nd St"
+
+Success! Data written to: hashicorp/street_address
+```
+
+Verify the data was written by viewing the `hashicorp_address.txt` file. This file is located in the same directory where Consul Template is running.
+
+```shell-session
+$ cat hashicorp_address.txt
+
+101 2nd St
+```
+
+If you update the key `hashicorp/street_address`, you can observe the changes to the file immediately.
+
+Change the content of the key in Consul to observe the process.
+
+```shell-session
+$ consul kv put hashicorp/street_address "22b Baker ST"
+
+Success! Data written to: hashicorp/street_address
+```
+
+Verify the new value was written by viewing the `hashicorp_address.txt` file.
+
+```shell-session
+$ cat hashicorp_address.txt
+
+22b Baker ST
+```
+
+You can now kill the `consul-template` process with `CTRL+c`.
+
+## Use case: discover all services
+
+In this example, you use Consul Template to discover all the services running in the Consul datacenter.
+
+First, create a new template `all-services.tpl` to query all services.
+
+
+
+```go
+{{ range services -}}
+# {{ .Name }}
+{{- range service .Name }}
+{{ .Address }}
+{{- end }}
+
+{{ end -}}
+```
+
+
+
+Next, run Consul Template and specify the template you just created and the `-once` flag. The `-once` flag tells the process to run once and then quit.
+
+```shell-session
+$ consul-template -template="all-services.tpl:all-services.txt" -once
+```
+
+If you complete this command with a [local development agent](/consul/docs/fundamentals/install/dev, the answer contains only the default `consul` service when viewing `all-services.txt`.
+
+```plaintext hideClipboard
+# consul
+127.0.0.1
+```
+
+On a development or production datacenter, you would get a list of all the services.
+
+For example:
+
+```plaintext hideClipboard
+# consul
+104.131.121.232
+
+# redis
+104.131.86.92
+104.131.109.224
+104.131.59.59
+
+# web
+104.131.86.92
+104.131.109.224
+104.131.59.59
+```
+
+## Production scenarios
+
+The previous examples demonstrate how to use Consul Template to render files dynamically with data in the Consul KV store or in the Consul catalog.
+
+You can extend this process to realize complex configurations.
+
+For example. you can use Consul Template to [automate an NGINX reverse proxy configuration](/consul/tutorials/network-automation/consul-template-load-balancing). This process uses the Consul catalog as a source for the upstream IPs.
+
+You can also use Consul Template to automate Consul operations using Vault as secret management. For example, you can:
+
+- [Generate and rotate gossip encryption keys for Consul](/consul/docs/automate/consul-template/vault/gossip)
+- [Generate and rotate mTLS certificates for Consul](/consul/docs/automate/consul-template/vault/mtls)
+
+For additional Consul Template examples, refer to [the examples folder in the `consul-template` GitHub repository](https://github.com/hashicorp/consul-template/tree/master/examples).
+
+To explore the available functions to use in your templates refer to the [Consul Template Go language reference](/consul/docs/reference/consul-template/go).
\ No newline at end of file
diff --git a/website/content/docs/automate/consul-template/install.mdx b/website/content/docs/automate/consul-template/install.mdx
new file mode 100644
index 000000000000..bf645ab3bf0f
--- /dev/null
+++ b/website/content/docs/automate/consul-template/install.mdx
@@ -0,0 +1,234 @@
+---
+layout: docs
+page_title: Install Consul Template
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Install Consul Template
+
+Consul Template is available as a pre-compiled binary or as a package for several operating systems. You can also build Consul Template from source.
+
+## Precompiled Binaries
+
+
+
+
+
+
+
+Install the required packages.
+
+```shell-session
+$ sudo apt-get update && \
+ sudo apt-get install wget gpg coreutils
+```
+
+Add the HashiCorp [GPG key][gpg-key].
+
+```shell-session
+$ wget -O- https://apt.releases.hashicorp.com/gpg | \
+ sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
+```
+
+Add the official HashiCorp Linux repository.
+
+```shell-session
+$ echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" \
+| sudo tee /etc/apt/sources.list.d/hashicorp.list
+```
+
+Update and install.
+
+```shell-session
+$ sudo apt-get update && sudo apt-get install consul-template
+```
+
+
+
+
+Install `yum-config-manager` to manage your repositories.
+
+```shell-session
+$ sudo yum install -y yum-utils
+```
+
+Use `yum-config-manager` to add the official HashiCorp Linux repository.
+
+```shell-session
+$ sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo
+```
+
+Install.
+
+```shell-session
+$ sudo yum -y install consul-template
+```
+
+
+
+
+Install `dnf config-manager` to manage your repositories.
+
+```shell-session
+$ sudo dnf install -y dnf-plugins-core
+```
+
+Use `dnf config-manager` to add the official HashiCorp Linux repository.
+
+```shell-session
+$ sudo dnf config-manager addrepo --from-repofile=https://rpm.releases.hashicorp.com/fedora/hashicorp.repo
+```
+
+Install.
+
+```shell-session
+$ sudo dnf -y install consul-template
+```
+
+
+
+
+Install `yum-config-manager` to manage your repositories.
+
+```shell-session
+$ sudo yum install -y yum-utils
+```
+
+Use `yum-config-manager` to add the official HashiCorp Linux repository.
+
+```shell-session
+$ sudo yum-config-manager \
+ --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
+```
+
+Install.
+
+```shell-session
+$ sudo yum -y install consul-template
+```
+
+
+
+
+Download a [precompiled binary](https://releases.hashicorp.com/consul-template/), verify the binary using the available `SHA-256` sums, and unzip the package to a location on your machine. Make sure that the location of the `consul-template` binary is available on your `PATH` before continuing with the other guides.
+
+
+
+
+
+
+
+
+
+
+
+[Homebrew](https://brew.sh) is a free and open source package management system
+for Mac OS X. Install the official [Consul Template
+formula](https://github.com/hashicorp/homebrew-tap) from the terminal.
+
+First, install the HashiCorp tap, a repository of all of the HashiCorp Homebrew
+packages.
+
+```shell-session
+$ brew tap hashicorp/tap
+```
+
+Now, install Consul Template with `hashicorp/tap/consul-template`.
+
+```shell-session
+$ brew install hashicorp/tap/consul-template
+```
+
+-> This command installs a signed binary and is automatically updated with
+every new official release.
+
+To update to the latest, run
+
+```shell-session
+$ brew upgrade hashicorp/tap/consul-template
+```
+
+
+
+Download a [precompiled binary](https://releases.hashicorp.com/consul-template/), verify the binary using the available `SHA-256` sums, and unzip the package to a location on your machine. Make sure that the location of the `consul-template` binary is available on your `PATH` before continuing with the other guides.
+
+
+
+
+
+
+
+
+
+
+
+[Chocolatey](https://chocolatey.org/) is a free and open-source package
+management system for Windows. Install the [Consul Template
+package](https://chocolatey.org/packages/consul-template) from the command-line.
+
+```shell-session
+$ choco install consul-template
+```
+
+-> Chocolatey and the Consul Template package are **NOT** directly maintained
+by HashiCorp. The latest version of Consul Template is always available by manual
+installation.
+
+
+
+
+Download a [precompiled binary](https://releases.hashicorp.com/consul-template/), verify the binary using the available `SHA-256` sums, and unzip the package to a location on your machine. Make sure that the location of the `consul-template` binary is available on your `PATH` before continuing with the other guides.
+
+
+
+
+
+
+
+
+## Compile from the source
+
+Clone the repository from GitHub [`hashicorp/consul-terraform-sync`](https://github.com/hashicorp/consul-terraform-sync) to build and install Consul Template binary in your path `$GOPATH/bin`. Building from source requires `git` and [Golang](https://go.dev/).
+
+```shell-session
+$ git clone https://github.com/hashicorp/consul-template.git
+```
+
+Enter the repository directory.
+
+```shell-session
+$ cd consul-template
+```
+
+Select the release you want to compile.
+
+```shell-session
+$ git checkout tags/
+```
+
+Build Consul Template for your system. The binary will be placed in `./bin`.
+
+```shell-session
+$ make dev
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option.
+
+```shell-session
+$ consul-template -version
+```
+
+## Run Consul Template as a Docker container
+
+Install and run Consul Template as a [Docker container](https://hub.docker.com/r/hashicorp/consul-template).
+
+```shell-session
+$ docker pull hashicorp/consul-template
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option.
+
+```shell-session
+$ docker run --rm hashicorp/consul-template -version
+```
diff --git a/website/content/docs/automate/consul-template/log.mdx b/website/content/docs/automate/consul-template/log.mdx
new file mode 100644
index 000000000000..3fd4ca1e9c0b
--- /dev/null
+++ b/website/content/docs/automate/consul-template/log.mdx
@@ -0,0 +1,83 @@
+---
+layout: docs
+page_title: Consul Template logs
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Consul Template logs
+
+This page describes the logging process for Consul Template.
+
+## Set log level
+
+To set the log level for Consul Template, use the `-log-level` flag:
+
+```shell-session
+$ consul-template -log-level info ...
+```
+
+You can also use the `CONSUL_TEMPLATE_LOG_LEVEL` environment variable to set the log level.
+
+```shell-session
+$ export CONSUL_TEMPLATE_LOG_LEVEL=info && consul-template ...
+```
+
+The command outputs the log in the standard output.
+
+
+
+```log
+# ...
+[INFO] (cli) received redis from Watcher
+[INFO] (cli) invoking Runner
+# ...
+```
+
+
+
+When debugging, you can also specify the level as debug:
+
+```shell-session
+$ consul-template -log-level debug ...
+```
+
+The command outputs the log in the standard output.
+
+
+
+```log
+# ...
+[DEBUG] (cli) creating Runner
+[DEBUG] (cli) creating Consul API client
+[DEBUG] (cli) creating Watcher
+[DEBUG] (cli) looping for data
+[DEBUG] (watcher) starting watch
+[DEBUG] (watcher) all pollers have started, waiting for finish
+[DEBUG] (redis) starting poll
+[DEBUG] (service redis) querying Consul with &{...}
+[DEBUG] (service redis) Consul returned 2 services
+[DEBUG] (redis) writing data to channel
+[DEBUG] (redis) starting poll
+[INFO] (cli) received redis from Watcher
+[INFO] (cli) invoking Runner
+[DEBUG] (service redis) querying Consul with &{...}
+# ...
+```
+
+
+
+## Log to file
+
+Consul Template can log to file as well. Logging to file is particularly useful in use cases where it is not trivial to capture *stdout* and/or *stderr*. For example, we recommend logging to file when Consul Template is deployed as a long running service.
+
+These are the relevant CLI flags:
+
+- `-log-file` - writes all the Consul Template log messages to a file. This value is used as a prefix for the log file name. The current timestamp
+ is appended to the file name. If the value ends in a path separator, `consul-template-` will be appended to the value. If the file name is missing an extension, `.log` is appended. For example, setting `log-file` to `/var/log/` would result in a log file path of `/var/log/consul-template-{timestamp}.log`. `log-file` can be combined with `-log-rotate-bytes` and `-log-rotate-duration` for a fine-grained log rotation experience.
+
+- `-log-rotate-bytes` - to specify the number of bytes that should be written to a log before it needs to be rotated. Unless specified, there is no limit to the number of bytes that can be written to a log file.
+
+- `-log-rotate-duration` - to specify the maximum duration a log should be written to before it needs to be rotated. Must be a duration value such as `30s`. Defaults to `24h`.
+
+- `-log-rotate-max-files` - to specify the maximum number of older log file archives to keep. Defaults to 0 (no files are ever deleted). Set to `-1` to discard old log files when a new one is created.
\ No newline at end of file
diff --git a/website/content/docs/automate/consul-template/mode.mdx b/website/content/docs/automate/consul-template/mode.mdx
new file mode 100644
index 000000000000..30045e651f22
--- /dev/null
+++ b/website/content/docs/automate/consul-template/mode.mdx
@@ -0,0 +1,235 @@
+---
+layout: docs
+page_title: Consul Template modes
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Consul Template modes
+
+Consul Template can run in different modes that change its runtime behavior and process lifecycle.
+
+- [Once Mode](#once-mode)
+- [De-Duplication Mode](#de-duplication-mode)
+- [Exec Mode](#exec-mode)
+
+## Once Mode
+
+When running in Once Mode, Consul template will execute each template exactly once and exit.
+
+In Once Mode, Consul Template waits for all dependencies to be rendered. If
+a template specifies a dependency that does not exist in Consul,
+once mode waits until Consul returns data for that dependency. Be aware
+that "returned data" and "empty data" are not mutually exclusive.
+
+To run in Once mode, include the `-once` flag or enable it in the [configuration file](/consul/docs/reference/consul-template/configuration).
+
+When you run the query `{{ service "foo" }}` to return all healthy services named "foo", you
+are asking Consul to return all the healthy services named "foo." If there are
+no services with that name, the response is the empty array. This response is identical to the
+response if there are no _healthy_ services named "foo."
+
+Consul Template processes input templates multiple times because the first result could impact later dependencies:
+
+```go
+{{ range services }}
+{{ range service .Name }}
+{{ end }}
+{{ end }}
+```
+
+In this example, we have to process the output of `services` before we can
+lookup each `service`, since the inner loops cannot be evaluated until the outer
+loop returns a response. Consul Template waits until it gets a response from
+Consul for all dependencies before rendering a template. It does not wait until
+that response is non-empty though.
+
+
+
+Once mode implicitly disables any wait or quiescence timers specified in configuration files or passed on the command line.
+
+
+
+## De-Duplication Mode
+
+Consul Template works by parsing templates to determine what data is needed and then watching Consul for any changes to that data. This process allows Consul Template to efficiently re-render templates when a change occurs. However, if there are many instances of Consul Template rendering a common template, you may encounter a linear duplication of work as each instance is querying the same data.
+
+To make this pattern more efficient, Consul Template supports work de-duplication across instances. You can enable this feature with the `-dedup` flag or in the top-level [`deduplicate` configuration block](/consul/docs/reference/consul-template/configuration#de-duplication-mode). Once enabled, Consul Template uses leader election on a per-template basis so that only a single node performs the queries. Results are shared among other instances rendering the same template by passing compressed data through the Consul K/V store.
+
+Be aware that no Vault data is stored in the compressed template. Because ACLs around Vault are typically more closely controlled than those ACLs around Consul's KV, Consul Template still requests the secret from Vault on each iteration.
+
+When running in de-duplication mode, it is important that local template functions resolve correctly.
+
+For example, you may have a local template function that relies on the `env` helper like this:
+
+```hcl
+{{ key (env "KEY") }}
+```
+
+It is crucial that the environment variable `KEY` in this example is consistent
+across all machines engaged in de-duplicating this template. If the values are
+different, Consul Template will be unable to resolve the template, and the template
+does not render successfully.
+
+## Exec Mode
+
+As of version `0.16.0`, Consul Template has the ability to maintain an arbitrary
+child process, similar to [envconsul](https://github.com/hashicorp/envconsul).
+
+This mode is most beneficial when running Consul Template in a container or on a
+scheduler like Nomad or Kubernetes. When
+activated, Consul Template will spawn and manage the lifecycle of the child
+process.
+
+Configuration options for running Consul Template in exec mode can be found in
+the [configuration documentation](/consul/docs/reference/consul-template/configuration#exec-mode).
+
+This mode is best explained through an example. Consider a simple application that
+reads a configuration file from disk and spawns a server from that configuration.
+
+```shell-session
+$ consul-template \
+ -template "/tmp/config.ctmpl:/tmp/server.conf" \
+ -exec "/bin/my-server -config /tmp/server.conf"
+```
+
+When Consul Template starts, it will pull the required dependencies and populate
+the `/tmp/server.conf`, which the `my-server` binary consumes. After that
+template is rendered completely the first time, Consul Template spawns and
+manages a child process. When any of the list templates change, Consul Template
+will send a configurable reload signal to the child process. Additionally,
+Consul Template will proxy any signals it receives to the child process. This
+enables a scheduler to control the lifecycle of the process and also eases the
+friction of running inside a container.
+
+The same rules that apply to the commands apply here,
+that is if you want to use a complex, shell-like command you need to be running
+on a system with `sh` on your PATH. These commands are run using `sh -c` with
+the shell handling all shell parsing. Otherwise you want the command to be a
+a single command or a, list formatted, command with arguments.
+
+
+
+On supporting systems (*nix, with `sh`) the
+[`setpgid`](https://man7.org/linux/man-pages/man2/setpgid.2.html) flag is set
+on the execution which ensures all signals are sent to all processes.
+
+
+
+There are some additional caveats with Exec Mode, which should be considered
+carefully before use:
+
+- If the child process dies, the Consul Template process will also die. Consul
+ Template **does not supervise the process.**. Supervision is generally the
+ responsibility of the scheduler or init system.
+
+- The child process must remain in the foreground. This is a requirement for
+ Consul Template to manage the process and send signals.
+
+- The exec command will only start after _all_ templates have been rendered at
+ least once. One may have multiple templates for a single Consul Template
+ process, all of which must be rendered before the process starts. Consider
+ something like an nginx or apache configuration where both the process
+ configuration file and individual site configuration must be written in order
+ for the service to successfully start.
+
+- After the child process is started, any change to any dependent template
+ causes the reload signal to be sent to the child process. If no reload signal
+ is provided, Consul Template will kill the process and spawn a new instance.
+ The reload signal can be specified and customized using the CLI or configuration
+ file.
+
+- When Consul Template is stopped gracefully, it will send the configurable kill
+ signal to the child process. The default value is SIGTERM, but it can be
+ customized in the CLI or configuration file.
+
+- Consul Template will forward all signals it receives to the child process
+ **except** its defined `reload_signal` and `kill_signal`. If you disable these
+ signals, Consul Template will forward them to the child process.
+
+- It is not possible to have more than one exec command, although each template
+ can still have its own reload command.
+
+- Individual template reload commands still fire independently of the exec
+ command.
+
+## Commands
+
+You can render templates with commands to execute on the host.
+
+### Environment
+
+The current process environment is used when executing commands with the following additional environment variables:
+
+- `CONSUL_HTTP_ADDR`
+- `CONSUL_HTTP_TOKEN`
+- `CONSUL_HTTP_TOKEN_FILE`
+- `CONSUL_HTTP_AUTH`
+- `CONSUL_HTTP_SSL`
+- `CONSUL_HTTP_SSL_VERIFY`
+- `NOMAD_ADDR`
+- `NOMAD_NAMESPACE`
+- `NOMAD_TOKEN`
+
+These environment variables are exported with their current values when the command executes. Other Consul tooling reads these environment variables, providing smooth integration with other Consul tools like `consul maint` or `consul lock`. Additionally, exposing these environment variables gives power users the ability to further customize their command script.
+
+### Multiple Commands
+
+The command configured for running on template rendering must take one of two forms.
+
+The first is as a list of the command and arguments split at spaces. The command can use an absolute path or be found on the execution environment's PATH and must be the first item in the list. This form allows for single or multi-word commands that can be executed directly with a system call.
+
+**Examples**
+
+```hcl
+command = ["echo", "hello"]
+## ...
+command = ["/opt/foo-package/bin/run-foo"]
+## ...
+command = ["foo"]
+```
+
+~> **Note** If you provide a single command without the list denoting square brackets (`[]`), it is converted into a list with a single argument. For example `command = "foo"` gets converted into `command = ["foo"]`
+
+The second form is as a single quoted command using system shell features. This form **requires** a shell named `sh` be on the executable search path (eg. PATH on \*nix). This is the standard on all \*nix systems and should work out of the box on those systems. This won't work on, for example, Docker images with only the executable and without a minimal system like Alpine. Using this form you can join multiple commands with logical operators, `&&` and `||`, use pipelines with `|`, conditionals, etc. Note that the shell `sh` is normally `/bin/sh` on \*nix systems and is either a POSIX shell or a shell running in POSIX compatible mode, so it is best to stick to POSIX shell syntax in this command.
+
+**Examples**
+
+```hcl
+command = "/opt/foo && /opt/bar"
+##...
+command = "if /opt/foo ; then /opt/bar ; fi"
+```
+
+Using this method you can run as many shell commands as you need with whatever logic you need. Though it is suggested that if it gets too long you might want to wrap it in a shell script, deploy and run that.
+
+### Shell Commands and Exec Mode
+
+Using the system shell based command has one additional caveat when used for the Exec mode process (the managed, executed process to which it will propagate signals). That is to get signals to work correctly means not only does anything the shell runs need to handle signals, but the shell itself needs to handle them. This needs to be managed by you as shells will exit upon receiving most signals.
+
+A common example configures the `SIGHUP` signal to trigger a reload of the underlying process and to be ignored by the shell process. There are two options:
+
+- Use `trap` to ignore the signal.
+
+- Use `exec` to replace the shell with another process.
+
+To use `trap` to ignore the signal, you call `trap` to catch the signal in the shell with no action.
+For example if you have an underlying nginx process and you want to run it with a shell command and have the shell ignore the HUP signals, you can use the following command:
+
+```hcl
+command = "trap '' HUP; /usr/sbin/nginx -c /etc/nginx/nginx.conf"
+```
+
+The `trap '' HUP;` bit is enough to get the shell to ignore the HUP signal. If you left off the `trap` command nginx would reload but the shell command would exit but leave the nginx still running, not unmanaged.
+
+Alternatively using `exec` will replace the shell's process with a sub-process, keeping the same PID and process grouping (allowing the sub-process to be managed). This is simpler, but a bit less flexible than `trap`, and looks like the following:
+
+```hcl
+command = "exec /usr/sbin/nginx -c /etc/nginx/nginx.conf"
+```
+
+Where the nginx process would replace the enclosing shell process to be managed by consul-template, receiving the Signals directly. Basically `exec` eliminates the shell from the equation.
+
+Refer to your shell's documentation on `trap` and `exec` for more specific details.
+
+
diff --git a/website/content/docs/automate/consul-template/plugins.mdx b/website/content/docs/automate/consul-template/plugins.mdx
new file mode 100644
index 000000000000..dee13ea862a2
--- /dev/null
+++ b/website/content/docs/automate/consul-template/plugins.mdx
@@ -0,0 +1,92 @@
+---
+layout: docs
+page_title: Execute custom scripts with Consul Template plugins
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Execute custom scripts with Consul Template plugins
+
+For some use cases, it may be necessary to write a plugin that offloads work to another system. This is especially useful for things that may not fit in the "standard library" of Consul Template, but still need to be shared across multiple instances.
+
+Consul Template plugins must have the following API:
+
+
+
+```shell-session
+$ NAME [INPUT...]
+```
+
+
+
+- `NAME` - the name of the plugin - this is also the name of the binary, either a full path or just the program name. It will be executed in a shell with the inherited `PATH` so e.g. the plugin `cat` will run the first executable `cat` that is found on the `PATH`.
+
+- `INPUT` - input from the template. There will be one INPUT for every argument passed to the `plugin` function. If the arguments contain whitespace, that whitespace will be passed as if the argument were quoted by the shell.
+
+## Important notes
+
+- Plugins execute user-provided scripts and pass in potentially sensitive data from Consul or Vault. Nothing is validated or protected by Consul Template, so all necessary precautions and considerations should be made by template authors
+
+- Plugin output must be returned as a string on `stdout`. Only `stdout` will be parsed for output. Be sure to log all errors and debugging messages onto `stderr` to avoid errors when Consul Template returns the value. Note that output to `stderr` will only be output if the plugin returns a non-zero exit code.
+
+- Always `exit 0` or Consul Template will assume the plugin failed to execute.
+
+- Ensure the empty input case is handled correctly (see [Multi-phase execution](https://github.com/hashicorp/consul-template/blob/main/README.md#multi-phase-execution))
+
+- Data piped into the plugin is appended after any parameters given explicitly. For example, {{ "sample-data" | plugin "my-plugin" "some-parameter"}}` will call `my-plugin some-parameter sample-data`.
+
+## Examples
+
+The following example plugin removes any JSON keys that start with an underscore and returns the JSON string:
+
+
+
+
+```go
+func main() {
+ arg := []byte(os.Args[1])
+
+ var parsed map[string]interface{}
+ if err := json.Unmarshal(arg, &parsed); err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Sprintf("err: %s", err))
+ os.Exit(1)
+ }
+
+ for k, _ := range parsed {
+ if string(k[0]) == "_" {
+ delete(parsed, k)
+ }
+ }
+
+ result, err := json.Marshal(parsed)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Sprintf("err: %s", err))
+ os.Exit(1)
+ }
+
+ fmt.Fprintln(os.Stdout, fmt.Sprintf("%s", result))
+ os.Exit(0)
+}
+```
+
+
+
+
+```ruby
+#! /usr/bin/env ruby
+require "json"
+
+if ARGV.empty?
+ puts JSON.fast_generate({})
+ Kernel.exit(0)
+end
+
+hash = JSON.parse(ARGV.first)
+hash.reject! { |k, _| k.start_with?("_") }
+puts JSON.fast_generate(hash)
+Kernel.exit(0)
+```
+
+
+
+
diff --git a/website/content/docs/automate/consul-template/render.mdx b/website/content/docs/automate/consul-template/render.mdx
new file mode 100644
index 000000000000..4e781808bff7
--- /dev/null
+++ b/website/content/docs/automate/consul-template/render.mdx
@@ -0,0 +1,52 @@
+---
+layout: docs
+page_title: Render templates
+description: >-
+ Consul Template is a tool available as a distinct binary that enables dynamic application configuration and secrets rotation for Consul deployments based on Go templates.
+---
+
+# Render templates
+
+This page describes to process to render templates, including common integrations on the command line.
+
+Render the template `/tmp/template.ctmpl` to `/tmp/result` on disk:
+
+```shell-session
+$ consul-template \
+ -template "/tmp/template.ctmpl:/tmp/result"
+```
+
+Render multiple templates in the same process. The optional third argument to the template is a command that will execute each time the template changes.
+
+```shell-session
+$ consul-template \
+ -template "/tmp/nginx.ctmpl:/var/nginx/nginx.conf:nginx -s reload" \
+ -template "/tmp/redis.ctmpl:/var/redis/redis.conf:service redis restart" \
+ -template "/tmp/haproxy.ctmpl:/var/haproxy/haproxy.conf"
+```
+
+Render a template using a custom Consul and Vault address:
+
+```shell-session
+$ consul-template \
+ -consul-addr "10.4.4.6:8500" \
+ -vault-addr "https://10.5.32.5:8200" \
+ -template "/tmp/template.ctmpl:/tmp/result"
+```
+
+Render all templates and then spawn and monitor a child process as a supervisor:
+
+```shell-session
+$ consul-template \
+ -template "/tmp/in.ctmpl:/tmp/result" \
+ -exec "/sbin/my-server"
+```
+
+For more information on supervising, refer to the [Consul Template Exec Mode documentation](/consul/docs/automate/consul-template/mode#exec-mode).
+
+Instruct Consul Template to use a configuration file with the `-config` flag:
+
+```shell
+$ consul-template -config "/my/config.hcl"
+```
+
diff --git a/website/content/docs/automate/consul-template/vault/gossip.mdx b/website/content/docs/automate/consul-template/vault/gossip.mdx
new file mode 100644
index 000000000000..dd8f1c68acb3
--- /dev/null
+++ b/website/content/docs/automate/consul-template/vault/gossip.mdx
@@ -0,0 +1,287 @@
+---
+layout: docs
+page_title: Generate and manage gossip encryption for Consul with Vault and Consul Template
+description: >-
+ Use Vault's secure secrets management and Consul Template to create and manage gossip key rotation for your Consul datacenter.
+---
+
+# Generate and manage gossip encryption for Consul with Vault and Consul Template
+
+This page describes the process to use HashiCorp Vault and Consul Template to automate the process to create and manage gossip encryption keys for your Consul datacenter.
+
+## Overview
+
+To configure your Consul datacenter for production use, one of the necessary steps is to enable gossip encryption for all the agents in the datacenter. The process is explained in detail in the [Manage gossip encryption](/consul/docs/secure/encryption/gossip/enable) documentation.
+
+Once the gossip communication is secured with a symmetric key, our recommended best practice is to define a policy for rotating the gossip keys based on a defined interval that meets your needs. You can review how to perform this process manually in the [Rotate Gossip Encryption Keys in Consul](/consul/docs/secure/encryption/gossip/rotate/vm) documentation.
+
+In this guide, you will integrate Consul with HashiCorp's Vault and Consul Template to securely store and rotate your encryption key. The process includes the following steps:
+
+1. Create the gossip encryption key.
+1. Connect to a Vault instance.
+1. Initialize a key value store in Vault.
+1. Store and retrieve the encryption key from Vault.
+1. Configure Consul Template to retrieve the key and then use a script to rotate it.
+1. Start Consul Template.
+
+-> The guide provides an example custom script to automate the encryption key rotation. You can use the script as a starting point to create your own rotation automation.
+
+## Prerequisites
+
+- **Consul:** to complete this guide, you need a Consul datacenter configured with gossip encryption enabled. Follow [Deploy Consul on VMs](/consul/tutorials/get-started-vms/virtual-machine-gs-deploy) to learn how to deploy a Consul datacenter with gossip encryption enabled.
+
+- **Vault:** this guide assumes you have a running Vault cluster in your network. You can use a [local Vault dev server](/vault/tutorials/get-started/setup#set-up-the-lab) or an existing Vault deployment.
+
+- **Consul Template:** to interact with your Consul agent you will need to [install the `consul-template` binary](/consul/docs/automate/consul-template/install) on a node. To rotate gossip keys you need the binary to be installed on one node only; changes will be automatically propagated across the Consul datacenter.
+
+The diagram below shows the minimal architecture needed to demonstrate the functionality.
+
+
+
+## Generate a encryption key
+
+You can use Consul's `consul keygen` command to generate the encryption key.
+
+```shell-session
+$ consul keygen | tee encryption.key
+
+T6kFttAkS3oSCS/nvlK8ONmfESmtKhCpRA2pc20RBcA=
+```
+
+## Configure Vault
+
+Vault provides a `kv` secrets engine that can be used to store arbitrary secrets. You will use this engine to store the encryption key.
+
+Before you can initialize the secrets engine, you need to set the `VAULT_ADDR` and `VAULT_TOKEN` environment variables so that you can connect to your local instance of Vault.
+
+```shell-session
+$ export VAULT_ADDR='http://127.0.0.1:8200'
+```
+
+The `VAULT_ADDR` environment variable should be set to the target Vault server address you want to connect to.
+
+```shell-session
+$ export VAULT_TOKEN="root"
+```
+
+The `VAULT_TOKEN` environment variable should store your client token (e.g. `root`).
+
+### Initialize the Vault secrets engine
+
+Enable key/value v2 secrets engine (`kv-v2`).
+
+```shell-session
+$ vault secrets enable kv-v2
+
+Success! Enabled the kv-v2 secrets engine at: kv-v2/
+```
+
+Once the secret engine is enabled, verify it is functioning properly using the following command.
+
+```shell-session
+$ vault secrets list
+
+Path Type Accessor Description
+---- ---- -------- -----------
+...
+kv-v2/ kv kv_5e0867f7 n/a
+secret/ kv kv_b5027aee key/value secret storage
+...
+```
+
+### Store the encryption key in Vault
+
+With the secret engine correctly initialized, you can store the gossip encryption key in it.
+
+```shell-session
+$ vault kv put kv-v2/consul/config/encryption key=$(cat encryption.key) ttl=1h
+
+Success! Data written to: kv-v2/consul/config/encryption
+```
+
+
+
+ Vault's KV secrets engine does not enforce TTLs for expiration. Instead, the `lease_duration` value can be used as a hint to consumers for how often they should check back for a new value. You can change the `ttl` value or not use it, however, if you want to integrate with Consul Template, you must define a TTL so that Consul Template can know when to check for new versions of the key.
+
+
+
+
+
+ In this tutorial, the TTL for the encryption key is being set to 1 hour, meaning that the key will be valid only for 1 hour before expiring. You can try using a shorter TTL in a test environment to ensure keys are revoked properly after the TTL has expired.
+
+
+
+### Retrieve the gossip encryption key from Vault
+
+Once the key is stored in Vault, you can retrieve it from any machine that has access to Vault.
+
+From your Consul server node, use the `vault kv get` command with the `-field` parameter to retrieve the key value only.
+
+```shell-session
+$ vault kv get -field=key kv-v2/consul/config/encryption | tee encryption.key
+
+T6kFttAkS3oSCS/nvlK8ONmfESmtKhCpRA2pc20RBcA=
+```
+
+Once you retrieved the encryption key from Vault you can use it to configure your new Consul datacenter or to rotate the key to an already existing datacenter with gossip encryption enabled.
+
+The process of key rotation should be automated when possible and the next paragraph will show you how to use Consul Template` to automate the process.
+
+## Rotate the gossip encryption key with Consul Template
+
+You can use Consul Template in your Consul datacenter to integrate with Vault's KV secrets engine and dynamically rotate Consul's gossip encryption keys.
+
+
+### Create a template file
+
+Create a Go template for Consul Template to retrieve the key from Vault.
+
+In this example, you will place these templates under `/opt/consul/templates`.
+
+```shell-session
+$ mkdir -p /opt/consul/templates
+```
+
+Create a file named `gossip.key.tpl` under `/opt/consul/templates` with the following content.
+
+
+
+```go
+{{ with secret "kv-v2/data/consul/config/encryption" }}
+{{ .Data.data.key}}
+{{ end }}
+```
+
+
+
+The template will interact with Vault using the `kv-v2/data/consul/config/encryption` path and will only retrieve the `key` value for the secret at that path.
+
+### Create the Consul Template configuration
+
+Write a configuration file for Consul Template that uses the template you created. The configuration will execute the template and render a file locally on your machine.
+
+Create a file named `consul_template.hcl` under `/opt/consul/templates` with the following content.
+
+
+
+```hcl
+# This denotes the start of the configuration section for Vault. All values
+# contained in this section pertain to Vault.
+vault {
+ # This is the address of the Vault leader. The protocol (http(s)) portion
+ # of the address is required.
+ address = "http://localhost:8200"
+
+ # This value can also be specified via the environment variable VAULT_TOKEN.
+ token = "root"
+
+ unwrap_token = false
+
+ renew_token = false
+}
+
+# This block defines the configuration for a template. Unlike other blocks,
+# this block may be specified multiple times to configure multiple templates.
+template {
+ # This is the source file on disk to use as the input template. This is often
+ # called the "consul-template template".
+ source = "/opt/consul/templates/gossip.key.tpl"
+
+ # This is the destination path on disk where the source template will render.
+ # If the parent directories do not exist, consul-template will attempt to
+ # create them, unless create_dest_dirs is false.
+ destination = "/opt/consul/gossip/gossip.key"
+
+ # This is the permission to render the file. If this option is left
+ # unspecified, consul-template will attempt to match the permissions of the
+ # file that already exists at the destination path. If no file exists at that
+ # path, the permissions are 0644.
+ perms = 0700
+
+ # This is the optional command to run when the template is rendered. The
+ # command will only run if the resulting template changes.
+ command = "/opt/rotate_key.sh"
+}
+```
+
+
+
+### Write a rotation script
+
+The last line of the configuration file refers to a script, located at `/opt/rotate_key.sh`, that will run every time a new key is retrieved from Vault.
+
+You can use the following example to create your own rotation script.
+
+
+
+```shell
+#!/usr/bin/env bash
+
+# Setup Consul address info
+export CONSUL_HTTP_ADDR="http://localhost:8500"
+
+# The new key will be in a file generated by consul-template
+# the script retrieves the key from the file
+NEW_KEY=`cat /opt/consul/gossip/gossip.key | sed -e '/^$/d'`
+
+# Install the key
+consul keyring -install ${NEW_KEY}
+
+# Set as primary
+consul keyring -use ${NEW_KEY}
+
+# Retrieve all keys used by Consul
+KEYS=`curl -s ${CONSUL_HTTP_ADDR}/v1/operator/keyring`
+
+ALL_KEYS=`echo ${KEYS} | jq -r '.[].Keys| to_entries[].key' | sort | uniq`
+
+for i in `echo ${ALL_KEYS}`; do
+ if [ $i != ${NEW_KEY} ] ; then
+ consul keyring -remove $i
+ fi
+done
+```
+
+
+
+### Start Consul Template
+
+Start Consul Template using the `-config` parameter to provide the configuration file.
+
+```shell-session
+$ consul-template -config "consul_template.hcl"
+```
+
+The command will start Consul Template as a long running daemon and it will keep listening for changes on Vault.
+
+## Rotate the Consul encryption key
+
+Once you have started the process, every time you update the key value in Vault, Consul Template will make sure that the new key is installed in Consul too.
+
+You can now use the `vault kv put` command to change the encryption key.
+
+```shell-session
+$ vault kv put kv-v2/consul/config/encryption key=$(consul keygen) ttl=1s
+```
+
+The script will pick up the `gossip.key` file containing the new key and use it to rotate the Consul gossip encryption key.
+
+It should output the following lines.
+
+```plaintext hideClipboard
+==> Installing new gossip encryption key...
+==> Changing primary gossip encryption key...
+==> Removing gossip encryption key...
+```
+
+You can test the key is actually changed in Consul using the `consul keyring` command:
+
+```shell-session
+$ consul keyring -list
+
+==> Gathering installed encryption keys...
+WAN:
+ ROfcQ/QLUgvBpIsWCCY9MtNqIyV7r3SS5eJmNZ6vUEA= [1/1]
+dc1 (LAN):
+ ROfcQ/QLUgvBpIsWCCY9MtNqIyV7r3SS5eJmNZ6vUEA= [1/1]
+```
diff --git a/website/content/docs/automate/consul-template/vault/mtls.mdx b/website/content/docs/automate/consul-template/vault/mtls.mdx
new file mode 100644
index 000000000000..f6327757e042
--- /dev/null
+++ b/website/content/docs/automate/consul-template/vault/mtls.mdx
@@ -0,0 +1,602 @@
+---
+layout: docs
+page_title: Generate mTLS Certificates for Consul with Vault and Consul Template
+description: >-
+ Use Vault and Consul Template to create and configure Vault-managed mTLS certificates for Consul's API and RPC traffic.
+---
+
+# Generate mTLS Certificates for Consul with Vault and Consul Template
+
+This page describes the process to use Vault's [PKI Secrets Engine](/vault/docs/secrets/pki) to generate and renew dynamic X.509 certificates, using [Consul Template](/consul/docs/automate/consul-template) to rotate your certificates.
+
+This method enables each agent in the Consul datacenter to have a unique certificate, with a relatively short time-to-live (TTL), that is automatically rotated, which allows you to safely and securely scale your datacenter while using mutual TLS (mTLS).
+
+## Prerequisites
+
+- **Consul:** to complete this guide, you need at least a node to install a Consul server agent and ideally another node to install a Consul client agent. Follow [Deploy Consul on VMs](/consul/tutorials/get-started-vms/virtual-machine-gs-deploy) to learn how to deploy a Consul agent. This page will provide you with the necessary specific configuration to apply for the scenario.
+
+- **Vault:** this guide assumes you have a running Vault cluster in your network. You can use a [local Vault dev server](/vault/tutorials/get-started/setup#set-up-the-lab) or an existing Vault deployment.
+
+- **Consul Template:** to interact with your Consul agent you will need to [install the `consul-template` binary](/consul/docs/automate/consul-template/install) on a node. To rotate gossip keys you need the binary to be installed on one node only; changes will be automatically propagated across the Consul datacenter.
+
+The diagram below shows the minimal architecture needed to demonstrate the functionality.
+
+
+
+## Configure Vault's PKI secrets engine
+
+Before you can initialize the secrets engine, you need to set the `VAULT_ADDR` and `VAULT_TOKEN` environment variables so that you can connect to your local instance of Vault.
+
+The `VAULT_ADDR` environment variable should be set to the target Vault server address you want to connect to.
+
+```shell-session
+$ export VAULT_ADDR='http://127.0.0.1:8200'
+```
+
+The `VAULT_TOKEN` environment variable should store your client token (e.g. `root`).
+
+```shell-session
+$ export VAULT_TOKEN="root"
+```
+
+Enable Vault's PKI secrets engine at the `pki` path.
+
+```shell-session
+$ vault secrets enable pki
+
+Success! Enabled the pki secrets engine at: pki/
+```
+
+Tune the PKI secrets engine to issue certificates with a maximum time-to-live (TTL) of `87600` hours.
+
+```shell-session
+$ vault secrets tune -max-lease-ttl=87600h pki
+
+Success! Tuned the secrets engine at: pki/
+```
+
+
+
+This tutorial uses a common and recommended pattern which is to have one mount act as the root CA and to use this CA only to sign intermediate CA CSRs from other PKI secrets engines (which you will create in the next few steps). For tighter security, you can store your CA outside of Vault and use the PKI engine only as an intermediate CA.
+
+
+
+## Configure Vault as Consul's CA
+
+Consul requires that all servers and clients have key pairs that are generated by a single Certificate Authority (CA).
+
+You will use Vault's PKI secrets engine to generate the necessary CA and certificates.
+
+#### 1. Generate the root CA
+
+Generate the root certificate and save the certificate in `CA_cert.crt`.
+
+```shell-session
+$ vault write -field=certificate pki/root/generate/internal \
+ common_name="dc1.consul" \
+ ttl=87600h > CA_cert.crt
+```
+
+This generates a new self-signed CA certificate and private key. Vault will automatically revoke the generated root at the end of its lease period (TTL); the CA certificate will sign its own Certificate Revocation List (CRL).
+
+
+
+You can adapt the TTL to comply with your internal policies on certificate lifecycle.
+
+
+
+You can inspect the certificate created using `openssl x509 -text -noout -in CA_cert.crt`
+
+Configure the CA and CRL URLs.
+
+```shell-session
+$ vault write pki/config/urls \
+ issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" \
+ crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
+```
+
+Example output:
+
+
+
+```plaintext
+Success! Data written to: pki/config/urls
+```
+
+
+
+#### 2. Generate an intermediate CA
+
+Enable the PKI secrets engine at the `pki_int` path.
+
+```shell-session
+$ vault secrets enable -path=pki_int pki
+
+Success! Enabled the pki secrets engine at: pki_int/
+```
+
+Tune the `pki_int` secrets engine to issue certificates with a maximum time-to-live (TTL) of `43800` hours.
+
+```shell-session
+$ vault secrets tune -max-lease-ttl=43800h pki_int
+
+Success! Tuned the secrets engine at: pki_int/
+```
+
+
+
+You can adapt the TTL to comply with your internal policies on certificate lifecycle.
+
+
+
+Request an intermediate certificate signing request (CSR) and save request as `pki_intermediate.csr`.
+
+```shell-session
+$ vault write -format=json pki_int/intermediate/generate/internal \
+ common_name="dc1.consul Intermediate Authority" \
+ | jq -r '.data.csr' > pki_intermediate.csr
+```
+
+The command has no output.
+
+#### 3. Sign the CSR and import the certificate into Vault
+
+```shell-session
+$ vault write -format=json pki/root/sign-intermediate csr=@pki_intermediate.csr \
+ format=pem_bundle ttl="43800h" \
+ | jq -r '.data.certificate' > intermediate.cert.pem
+```
+
+The command has no output.
+
+Once the CSR is signed, and the root CA returns a certificate, it can be imported back into Vault.
+
+```shell-session
+$ vault write pki_int/intermediate/set-signed certificate=@intermediate.cert.pem
+
+Success! Data written to: pki_int/intermediate/set-signed
+```
+
+#### 4. Create a Vault role
+
+A role is a logical name that maps to a policy used to generate credentials.
+
+```shell-session
+$ vault write pki_int/roles/consul-dc1 \
+ allowed_domains="dc1.consul" \
+ allow_subdomains=true \
+ generate_lease=true \
+ max_ttl="720h"
+```
+
+Example output:
+
+
+
+```plaintext
+Success! Data written to: pki_int/roles/consul-dc1
+```
+
+
+
+For this guide, you are using the following options for the role:
+
+- `allowed_domains`: Specifies the domains of the role. The command uses `dc1.consul` as the domain, which is the default configuration you are going to use for Consul.
+- `allow_subdomains`: Specifies if clients can request certificates with CNs that are subdomains of the CNs allowed by the other role options
+
+
+
+ This includes wildcard subdomains.
+
+
+
+- `generate_lease`: Specifies if certificates issued/signed against this role will have Vault leases attached to them. Certificates can be added to the CRL by Vault revoke `` when certificates are associated with leases.
+
+This completes the Vault configuration as a CA.
+
+## Generate a server certificate
+
+You can test the `pki` engine is configured correctly by generating your first certificate.
+
+```shell-session
+$ vault write pki_int/issue/consul-dc1 \
+ common_name="server.dc1.consul" \
+ ttl="24h" | tee certs.txt
+```
+
+
+
+The TTL for the certificate is being set to 24 hours in this guide, meaning that this certificate will be valid only for 24 hours before expiring. You can try using a shorter TTL on a test environment to ensure certificates are revoked properly after TTL is expired.
+
+
+
+Example output:
+
+
+
+```plaintext
+Key Value
+--- -----
+lease_id pki_int/issue/consul-dc1/lFfKfpxtM0xY0AHDlr9pJ2GM
+lease_duration 23h59m59s
+lease_renewable false
+ca_chain [-----BEGIN CERTIFICATE-----
+##...
+-----END CERTIFICATE-----]
+certificate -----BEGIN CERTIFICATE-----
+##...
+-----END CERTIFICATE-----
+expiration 1599645187
+issuing_ca -----BEGIN CERTIFICATE-----
+##...
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+##...
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
+serial_number 3f:ec:bd:ea:01:a6:35:49:a7:6d:17:ba:13:88:c1:b8:35:b4:fc:4c
+```
+
+
+
+## Configure Consul
+
+
+
+
+Configure Consul TLS using the following configuration:
+
+
+
+```hcl
+tls {
+ defaults {
+ verify_incoming = true
+ verify_outgoing = true
+ verify_server_hostname = true
+ ca_file = "/opt/consul/agent-certs/ca.crt"
+ cert_file = "/opt/consul/agent-certs/agent.crt"
+ key_file = "/opt/consul/agent-certs/agent.key"
+ }
+}
+
+auto_encrypt {
+ allow_tls = true
+}
+```
+
+```json
+{
+ "tls": {
+ "defaults": {
+ "verify_incoming": true,
+ "verify_outgoing": true,
+ "verify_server_hostname": true,
+ "ca_file": "/opt/consul/agent-certs/ca.crt",
+ "cert_file": "/opt/consul/agent-certs/agent.crt",
+ "key_file": "/opt/consul/agent-certs/agent.key"
+ }
+ },
+ "auto_encrypt": {
+ "allow_tls": true
+ }
+}
+```
+
+
+
+
+To configure TLS encryption for Consul servers, three files are required:
+
+- `ca_file` - CA (or intermediate) certificate to verify the identity of the other nodes.
+- `cert_file` - Consul agent public certificate
+- `key_file` - Consul agent private key
+
+For the first Consul startup, you will use the certificate generated earlier.
+
+Use the following commands to extract the two certificates and private key from the `certs.txt` and place them into the right file and location.
+
+Create the certificates folder.
+
+```shell-session
+$ mkdir -p /opt/consul/agent-certs
+```
+
+Extract the root CA certificate.
+
+```shell-session
+$ grep -Pzo "(?s)(?<=issuing_ca)[^\-]*.*?END CERTIFICATE[^\n]*\n" certs.txt | sed 's/^\s*-/-/g' > /opt/consul/agent-certs/ca.crt
+```
+
+Extract the agent certificate.
+
+```shell-session
+$ grep -Pzo "(?s)(?<=certificate)[^\-]*.*?END CERTIFICATE[^\n]*\n" certs.txt | sed 's/^\s*-/-/g' > /opt/consul/agent-certs/agent.crt
+```
+
+Extract the agent key.
+
+```shell-session
+$ grep -Pzo "(?s)(?<=private_key)[^\-]*.*?END RSA PRIVATE KEY[^\n]*\n" certs.txt | sed 's/^\s*-/-/g' > /opt/consul/agent-certs/agent.key
+```
+
+
+
+
+
+
+This section describes the automated [client certificate deployment process](/consul/docs/reference/agent/configuration-file/encryption#auto_encrypt) available in Consul 1.5.2 and newer.
+
+
+
+With auto-encryption, you can configure the Consul servers to automatically distribute certificates to the clients. To use this feature, you will need to configure clients to automatically get the certificates from the server.
+
+Configure Consul client TLS using the following configuration:
+
+
+
+```hcl
+tls {
+ defaults {
+ verify_incoming = true
+ verify_outgoing = true
+ verify_server_hostname = true
+ ca_file = "/opt/consul/agent-certs/ca.crt"
+ }
+}
+
+auto_encrypt {
+ tls = true
+}
+```
+
+```json
+{
+ "tls": {
+ "defaults": {
+ "verify_incoming": true,
+ "verify_outgoing": true,
+ "verify_server_hostname": true,
+ "ca_file": "/opt/consul/agent-certs/ca.crt",
+ }
+ },
+ "auto_encrypt": {
+ "tls": true
+ }
+}
+```
+
+
+
+
+To configure TLS encryption for Consul clients only one file is required:
+
+- `ca_file` - CA (or intermediate) certificate to verify the identity of the other nodes.
+
+Use the following commands to extract the certificate from the `certs.txt` and place them into the right file and location.
+
+Create the certificates folder.
+
+```shell-session
+$ mkdir -p /opt/consul/agent-certs
+```
+
+Extract the root CA certificate.
+
+```shell-session
+$ grep -Pzo "(?s)(?<=certificate)[^\-]*.*?END CERTIFICATE[^\n]*\n" certs.txt | sed 's/^\s*-/-/g' > /opt/consul/agent-certs/agent.crt
+```
+
+
+
+
+## Configure Consul Template
+
+The guide steps used `ttl="24h"` ad a parameter during certificate creation, meaning that this certificates will be valid only for 24 hours before expiring.
+
+Deciding the right trade-off for certificate lifespan is always a compromise between security and agility. A possible third way that does not require you to lower your security is to use Consul Template to automate certificate renewal for Consul when the TTL is expired.
+
+### Create template files
+
+You can instruct Consul Template to generate and retrieve those files from Vault using the following templates:
+
+
+
+```go
+{{ with secret "pki_int/issue/consul-dc1" "common_name=server.dc1.consul" "ttl=24h" "alt_names=localhost" "ip_sans=127.0.0.1"}}
+{{ .Data.certificate }}
+{{ end }}
+```
+
+
+
+The template uses the `pki_int/issue/consul-dc1` endpoint that Vault exposes to generate new certificates. It also mentions the common name and alternate names for the certificate.
+
+
+
+```go
+{{ with secret "pki_int/issue/consul-dc1" "common_name=server.dc1.consul" "ttl=24h" "alt_names=localhost" "ip_sans=127.0.0.1"}}
+{{ .Data.private_key }}
+{{ end }}
+```
+
+
+
+The same endpoint also exposes the CA certificate under the `.Data.issuing_ca` parameter.
+
+
+
+```go
+{{ with secret "pki_int/issue/consul-dc1" "common_name=server.dc1.consul" "ttl=24h"}}
+{{ .Data.issuing_ca }}
+{{ end }}
+```
+
+
+
+
+Copy the newly created files into `/opt/consul/templates`.
+
+```shell-session
+$ cp *.tpl /opt/consul/templates/
+```
+
+### Create Consul Template configuration
+
+Create a configuration file, `consul_template.hcl`, that will instruct Consul Template to retrieve the files needed for the Consul agents, client and server, to configure TLS encryption.
+
+
+
+
+
+
+```hcl
+# This denotes the start of the configuration section for Vault. All values
+# contained in this section pertain to Vault.
+vault {
+ # This is the address of the Vault leader. The protocol (http(s)) portion
+ # of the address is required.
+ address = "http://localhost:8200"
+
+ # This value can also be specified via the environment variable VAULT_TOKEN.
+ token = "root"
+
+ unwrap_token = false
+
+ renew_token = false
+}
+
+# This block defines the configuration for a template. Unlike other blocks,
+# this block may be specified multiple times to configure multiple templates.
+template {
+ # This is the source file on disk to use as the input template. This is often
+ # called the "consul-template template".
+ source = "agent.crt.tpl"
+
+ # This is the destination path on disk where the source template will render.
+ # If the parent directories do not exist, consul-template will attempt to
+ # create them, unless create_dest_dirs is false.
+ destination = "/opt/consul/agent-certs/agent.crt"
+
+ # This is the permission to render the file. If this option is left
+ # unspecified, consul-template will attempt to match the permissions of the
+ # file that already exists at the destination path. If no file exists at that
+ # path, the permissions are 0644.
+ perms = 0700
+
+ # This is the optional command to run when the template is rendered. The
+ # command will only run if the resulting template changes.
+ command = "sh -c 'date && consul reload'"
+}
+
+template {
+ source = "agent.key.tpl"
+ destination = "/opt/consul/agent-certs/agent.key"
+ perms = 0700
+ command = "sh -c 'date && consul reload'"
+}
+
+template {
+ source = "ca.crt.tpl"
+ destination = "/opt/consul/agent-certs/ca.crt"
+ command = "sh -c 'date && consul reload'"
+}
+```
+
+
+
+The configuration file for the server contains the information to retrieve the
+CA certificate as well as the certificate/key pair for the server agent.
+
+
+
+
+
+
+
+```hcl
+# This denotes the start of the configuration section for Vault. All values
+# contained in this section pertain to Vault.
+vault {
+ # This is the address of the Vault leader. The protocol (http(s)) portion
+ # of the address is required.
+ address = "http://localhost:8200"
+
+ # This value can also be specified via the environment variable VAULT_TOKEN.
+ token = "root"
+
+ unwrap_token = false
+
+ renew_token = false
+}
+
+template {
+ source = "ca.crt.tpl"
+ destination = "/opt/consul/agent-certs/ca.crt"
+ command = "sh -c 'date && consul reload'"
+}
+```
+
+
+
+The configuration file for the client contains the information to retrieve the CA certificate only, the certificates for client agents are automatically
+generated from Consul when using the `auto_encrypt` setting.
+
+
+
+
+
+To allow Consul Template to communicate with Vault, define the following parameters:
+
+- `address` : the address of your Vault server. If Vault runs on the same node as Consul, you can use `http://localhost:8200`.
+
+- `token` : a valid Vault ACL token with appropriate permissions. You can use Vault root token for this example.
+
+
+
+The use of Vault root token is not recommended for production use; the recommended security approach is to create a new token based on a specific policy with limited privileges.
+
+
+
+### Start Consul Template
+
+Start Consul Template using the `-config` parameter to provide the configuration file.
+
+```shell-session
+$ consul-template -config "consul_template.hcl"
+
+Configuration reload triggered
+```
+
+## Verify certificate rotation
+
+The certificate you created manually for the Consul server had a TTL of 24 hours.
+
+This means that after the certificate expires Vault will renew it and Consul Template will update the files on your agent it reload Consul configuration automatically to make it pick up the new files.
+
+You can verify the rotation by checking that Consul Template keeps listing, every 24 hours, a timestamp and the log line:
+
+```plaintext hideClipboard
+Configuration reload triggered
+```
+
+You can also use `openssl` to verify the certificate content:
+
+```shell-session
+$ openssl x509 -text -noout -in /opt/consul/agent-certs/agent.crt
+
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number:
+ 1b:2d:d6:5d:63:9b:aa:05:84:7b:be:3b:6f:e1:95:bb:1c:36:8c:a4
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: CN=dc1.consul Intermediate Authority
+ Validity
+ Not Before: Sep 16 16:03:45 2020 GMT
+ Not After : Sep 16 16:06:15 2020 GMT
+ Subject: CN=server.dc1.consul
+...
+```
+
+and verify that the `Not Before` and `Not After` values are being updated to reflect the new certificate.
diff --git a/website/content/docs/automate/index.mdx b/website/content/docs/automate/index.mdx
new file mode 100644
index 000000000000..a55ce8aec0b4
--- /dev/null
+++ b/website/content/docs/automate/index.mdx
@@ -0,0 +1,44 @@
+---
+layout: docs
+page_title: Dynamically configure applications
+description: >-
+ This topic provides an overview of deployment strategies that use Consul's key/value (KV) store to dynamically update applications and Consul configurations in response to deployment changes.
+---
+
+# Dynamically configure applications
+
+This topic provides an overview for dynamically configuring applications when Consul detects certain changes in your network. Many of these operations rely on Consul's key/value (KV)store.
+
+For information about automatically deploying infrastructure when Consul detects failed health checks or increased network traffic, refer to [Consul-Terraform-Sync](/consul/docs/automate/infrastructure).
+
+## Introduction
+
+Platform operators managing a Consul deployment at scale require automated processes for generating and distributing updated configurations dynamically as applications change. You can use Consul's KV store to store data for generating Consul agent configurations, and set up the agent to invoke custom scripts in response to changes.
+
+## Key/value store
+
+@include 'text/descriptions/kv/store.mdx'
+
+## Sessions
+
+@include 'text/descriptions/kv/session.mdx'
+
+## Watches
+
+@include 'text/descriptions/kv/watch.mdx'
+
+## Consul template
+
+@include 'text/descriptions/consul-template.mdx'
+
+## Network infrastructure automation
+
+@include 'text/descriptions/network-infrastructure-automation.mdx'
+
+## Guidance
+
+@include 'text/guidance/automate.mdx'
+
+### Constraints, limitations, and troubleshooting
+
+@include 'text/limitations/kv.mdx'
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/configure.mdx b/website/content/docs/automate/infrastructure/configure.mdx
new file mode 100644
index 000000000000..ad95ddbb78ec
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/configure.mdx
@@ -0,0 +1,107 @@
+---
+layout: docs
+page_title: Configure Consul-Terraform-Sync
+description: >-
+ A high level guide to configure Consul-Terraform-Sync.
+---
+
+# Configure Consul-Terraform-Sync
+
+The page will cover the main components for configuring your Network Infrastructure Automation with Consul at a high level. For the full list of configuration options, visit the [Consul-Terraform-Sync (CTS) configuration page](/consul/docs/reference/cts).
+
+## Tasks
+
+A task captures a network automation process by defining which network resources to update on a given condition. Configure CTS with one or more tasks that contain a list of Consul services, a Terraform module, and various Terraform providers.
+
+Within the [`task` block](/consul/docs/nia/configuration#task), the list of services for a task represents the service layer that drives network automation. The `module` is the discovery location of the Terraform module that defines the network automation process for the task. The `condition`, not shown below, defaults to the services condition when unconfigured such that network resources are updated on changes to the list of services over time.
+
+Review the Terraform module to be used for network automation and identify the Terraform providers required by the module. If the module depends on a set of providers, include the list of provider names in the `providers` field to associate the corresponding provider configuration with the task. These providers will need to be configured later in a separate block.
+
+```hcl
+task {
+ name = "website-x"
+ description = "automate services for website-x"
+ module = "namespace/example/module"
+ version = "1.0.0"
+ providers = ["myprovider"]
+ condition "services" {
+ names = ["web", "api"]
+ }
+}
+```
+
+## Terraform Providers
+
+Configuring Terraform providers within CTS requires 2 config components. The first component is required within the [`driver.terraform` block](/consul/docs/nia/configuration#terraform-driver). All providers configured for CTS must be listed within the `required_providers` stanza to satisfy a [Terraform v0.13+ requirement](/terraform/language/providers/requirements#requiring-providers) for Terraform to discover and install them. The providers listed are later organized by CTS to be included in the appropriate Terraform configuration files for each task.
+
+```hcl
+driver "terraform" {
+ required_providers {
+ myprovider = {
+ source = "namespace/myprovider"
+ version = "1.3.0"
+ }
+ }
+}
+```
+
+The second component for configuring a provider is the [`terraform_provider` block](/consul/docs/nia/configuration#terraform-provider). This block resembles [provider blocks for Terraform configuration](/terraform/language/providers/configuration) and has the same responsibility for understanding API interactions and exposing resources for a specific infrastructure platform.
+
+Terraform modules configured for task automation may require configuring the referenced providers. For example, configuring the host address and authentication to interface with your network infrastructure. Refer to the Terraform provider documentation hosted on the [Terraform Registry](https://registry.terraform.io/browse/providers) to find available options. The `terraform_provider` block is loaded by CTS during runtime and processed to be included in [autogenerated Terraform configuration files](/consul/docs/nia/network-drivers#provider) used for task automation. Omitting the `terraform_provider` block for a provider will defer to the Terraform behavior assuming an empty default configuration.
+
+```hcl
+terraform_provider "myprovider" {
+ address = "myprovider.example.com"
+}
+```
+
+## Summary
+
+Piecing it all together, the configuration file for CTS will have several HCL blocks in addition to other options for configuring the CTS daemon: `task`, `driver.terraform`, and `terraform_provider` blocks.
+
+An example HCL configuration file is shown below to automate one task to execute a Terraform module on the condition when there are changes to two services.
+
+
+
+```hcl
+log_level = "info"
+
+syslog {
+ enabled = true
+}
+
+consul {
+ address = "consul.example.com"
+}
+
+task {
+ name = "website-x"
+ description = "automate services for website-x"
+ module = "namespace/example/module"
+ version = "1.0.0"
+ providers = ["myprovider"]
+ condition "services" {
+ names = ["web", "api"]
+ }
+ buffer_period {
+ min = "10s"
+ }
+}
+
+driver "terraform" {
+ log = true
+
+ required_providers {
+ myprovider = {
+ source = "namespace/myprovider"
+ version = "1.3.0"
+ }
+ }
+}
+
+terraform_provider "myprovider" {
+ address = "myprovider.example.com"
+}
+```
+
+
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/high-availability.mdx b/website/content/docs/automate/infrastructure/high-availability.mdx
new file mode 100644
index 000000000000..1fdfcdafe7c9
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/high-availability.mdx
@@ -0,0 +1,183 @@
+---
+layout: docs
+page_title: Run Consul-Terraform-Sync with high availability
+description: >-
+ Improve network automation resiliency by enabling high availability for Consul-Terraform-Sync. HA enables persistent task and event data so that CTS functions as expected during a failover event.
+---
+
+# Run Consul-Terraform-Sync with high availability
+
+
+ An enterprise license is only required for enterprise distributions of Consul-Terraform-Sync (CTS).
+
+
+This topic describes how to run Consul-Terraform-Sync (CTS) configured for high availability. High availability is an enterprise capability that ensures that all changes to Consul that occur during a failover transition are processed and that CTS continues to operate as expected.
+
+## Introduction
+
+A network always has exactly one instance of the CTS cluster that is the designated leader. The leader is responsible for monitoring and running tasks. If the leader fails, CTS triggers the following process when it is configured for high availability:
+
+1. The CTS cluster promotes a new leader from the pool of followers in the network.
+1. The new leader begins running all existing tasks in `once-mode` in order to process changes that occurred during the failover transition period. In this mode, CTS runs all existing tasks one time.
+1. The new leader logs any errors that occur during `once-mode` operation and the new leader continues to monitor Consul for changes.
+
+In a standard configuration, CTS exits if errors occur when the CTS instance runs tasks in `once-mode`. In a high availability configuration, CTS logs the errors and continues to operate without interruption.
+
+The following diagram shows operating state when high availability is enabled. CTS Instance A is the current leader and is responsible for monitoring and running tasks:
+
+
+
+The following diagram shows the CTS cluster state after the leader stops. CTS Instance B becomes the leader responsible for monitoring and running tasks.
+
+
+
+### Failover details
+
+- The time it takes for a new leader to be elected is determined by the `high_availability.cluster.storage.session_ttl` configuration. The minimum failover time is equal to the `session_ttl` value. The maximum failover time is double the `session_ttl` value.
+- If failover occurs during task execution, a new leader is elected. The new leader will attempt to run all tasks once before continuing to monitor for changes.
+- If using the [HCP Terraform driver](/consul/docs/nia/network-drivers/terraform-cloud), the task finishes and CTS starts a new leader that attempts to queue a run for each task in HCP Terraform in once-mode.
+- If using [Terraform driver](/consul/docs/automate/infrastructure/network-driver/terraform), the task may complete depending on the cause of the failover. The new leader starts and attempts to run each task in [once-mode](/consul/docs/nia/cli/start#modes). Depending on the module and provider, the task may require manual intervention to fix any inconsistencies between the infrastructure and Terraform state.
+- If failover occurs when no task is executing, CTS elects a new leader that attempts to run all tasks in once-mode.
+
+Note that driver behavior is consistent whether or not CTS is running in high availability mode.
+
+## Requirements
+
+Verify that you have met the [basic requirements](/consul/docs/automate/infrastructure/requirements) for running CTS.
+
+* CTS Enterprise 0.7 or later
+* Terraform CLI 0.13 or later
+* All instances in a cluster must be in the same datacenter.
+
+You must configure appropriate ACL permissions for your cluster. Refer to [ACL permissions](#) for details.
+
+We recommend specifying the [HCP Terraform driver](/consul/docs/nia/network-drivers/terraform-cloud) in your CTS configuration if you want to run in high availability mode.
+
+## Configuration
+
+Add the `high_availability` block in your CTS configuration and configure the required settings to enable high availability. Refer to the [Configuration reference](/consul/docs/nia/configuration#high-availability) for details about the configuration fields for the `high_availability` block.
+
+The following example configures high availability functionality for a cluster named `cts-cluster`:
+
+
+
+```hcl
+high_availability {
+ cluster {
+ name = "cts-cluster"
+ storage "consul" {
+ parent_path = "cts"
+ namespace = "ns"
+ session_ttl = "30s"
+ }
+ }
+
+ instance {
+ address = "cts-01.example.com"
+ }
+}
+```
+
+
+### ACL permissions
+
+The `session` and `keys` resources in your Consul environment must have `write` permissions. Refer to the [ACL documentation](/consul/docs/secure/acl) for details on how to define ACL policies.
+
+If the `high_availability.cluster.storage.namespace` field is configured, then your ACL policy must also enable `write` permissions for the `namespace` resource.
+
+## Start a new CTS cluster
+
+We recommend deploying a cluster that includes three CTS instances. This is so that the cluster has one leader and two followers.
+
+1. Create an HCL configuration file that includes the settings you want to include, including the `high_availability` block. Refer to [Configuration Options for Consul-Terraform-Sync](/consul/docs/reference/cts) for all configuration options.
+1. Issue the startup command and pass the configuration file. Refer to the [`start` command reference](/consul/docs/nia/cli/start#modes) for additional information about CTS startup modes.
+ ```shell-session
+ $ consul-terraform-sync start -config-file ha-config.hcl
+ ```
+1. You can call the `/status` API endpoint to verify the status of tasks CTS is configured to monitor. Only the leader of the cluster will return a successful response. Refer to the [`/status` API reference documentation](/consul/docs/reference/cts/api/status) for information about usage and responses.
+
+ ```shell-session
+ $ curl localhost:/status/tasks
+ ```
+
+Repeat the procedure to start the remaining instances for your cluster. We recommend using near-identical configurations for all instances in your cluster. You may not be able to use exact configurations in all cases, but starting instances with the same configuration improves consistency and reduces confusion if you need to troubleshoot errors.
+
+## Modify an instance configuration
+
+You can implement a rolling update to update a non-task configuration for a CTS instance, such as the Consul connection settings. If you need to update a task in the instance configuration, refer to [Modify tasks](#modify-tasks).
+
+1. Identify the leader CTS instance by either making a call to the [`status/cluster` API endpoint](/consul/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
+ ```shell-session
+ [INFO] ha: acquired leadership lock: id=
+ ```
+1. Stop one of the follower CTS instances and apply the new configuration.
+1. Restart the follower instance.
+1. Repeat steps 2 and 3 for other follower instances in your cluster.
+1. Stop the leader instance. One of the follower instances becomes the leader.
+1. Apply the new configuration to the former leader instance and restart it.
+
+## Modify tasks
+
+When high availability is enabled, CTS persists task and event data. Refer to [State storage and persistence](/consul/docs/nia/architecture#state-storage-and-persistence) for additional information.
+
+You can use the following methods for modifying tasks when high availability is enabled. We recommend choosing a single method to make all task configuration changes because inconsistencies between the state and the configuration can occur when mixing methods.
+
+### Delete and recreate the task
+
+We recommend deleting and recreating a task if you need to make a modification. Use the CTS API to identify the CTS leader instance and replace a task.
+
+1. Identify the leader CTS instance by either making a call to the [`status/cluster` API endpoint](/consul/docs/nia/api/status#cluster-status) or by checking the logs for the following entry:
+
+ ```shell-session
+ [INFO] ha: acquired leadership lock: id=
+ ```
+1. Send a `DELETE` call to the [`/task/` endpoint](/consul/docs/nia/api/tasks#delete-task) to delete the task. In the following example, the leader instance is at `localhost:8558`:
+
+ ```shell-session
+ $ curl --request DELETE localhost:8558/v1/tasks/task_a
+ ```
+
+ You can also use the [`task delete` command](/consul/docs/nia/cli/task#task-delete) to complete this step.
+
+1. Send a `POST` call to the `/task/` endpoint and include the updated task in your payload.
+ ```shell-session
+ $curl --header "Content-Type: application/json" \
+ --request POST \
+ --data @payload.json \
+ localhost:8558/v1/tasks
+ ```
+
+ You can also use the [`task-create` command](/consul/docs/nia/cli/task#task-create) to complete this step.
+
+### Discard data with the `-reset-storage` flag
+
+You can restart the CTS cluster using the [`-reset-storage` flag](/consul/docs/nia/cli/start#options) to discard persisted data if you need to update a task.
+
+1. Stop a follower instance.
+1. Update the instance’s task configuration.
+1. Restart the instance and include the `-reset-storage` flag.
+1. Stop all other instances so that the updated instance becomes the leader.
+1. Start all other instances again.
+1. Restart the instance you restarted in step 3 without the `-reset-storage` flag so that it starts up with the current state. If you continue to run an instance with the `-reset-storage` flag enabled, then CTS will reset the state data whenever the instance becomes the leader.
+
+## Troubleshooting
+
+Use the following troubleshooting procedure if a previous leader had been running a task successfully but the new leader logs an error after a failover:
+
+1. Check the logs printed to the console for errors. Refer to the [`syslog` configuration](/consul/docs/nia/configuration#syslog) for information on how to locate the logs. In the following example output, CTS reported a `401: Bad credentials` error:
+ ```shell-session
+ 2022-08-23T09:25:09.501-0700 [ERROR] tasksmanager: error applying task: task_name=config-task
+ error=
+ | error tf-apply for 'config-task': exit status 1
+ |
+ | Error: GET https://api.github.com/user: 401 Bad credentials []
+ |
+ | with module.config-task.provider["registry.terraform.io/integrations/github"],
+ | on .terraform/modules/config-task/main.tf line 11, in provider "github":
+ | 11: provider "github" {
+ |
+ ```
+1. Check for differences between the previous leader and new leader, such as differences in configurations, environment variables, and local resources.
+1. Start a new instance with the fix that resolves the issue.
+1. Tear down the leader instance that has the issue and any other instances that may have the same issue.
+1. Restart the affected instances to implement the fix.
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/index.mdx b/website/content/docs/automate/infrastructure/index.mdx
new file mode 100644
index 000000000000..ca7537e44cc9
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/index.mdx
@@ -0,0 +1,76 @@
+---
+layout: docs
+page_title: Network infrastructure automation
+description: >-
+ Network infrastructure automation (NIA) is the concept of dynamically updating infrastructure devices triggered by service changes. Consul-Terraform-Sync is a tool that performs NIA and utilizes Consul as a data source that contains networking information about services and monitors those services. Terraform is used as the underlying automation tool and leverages the Terraform provider ecosystem to drive relevant changes to the network infrastructure.
+---
+
+# Network infrastructure automation
+
+Network infrastructure automation (NIA) enables dynamic updates to network infrastructure devices triggered by service changes. Consul-Terraform-Sync (CTS) utilizes Consul as a data source that contains networking information about services and monitors those services. Terraform is used as the underlying automation tool and leverages the Terraform provider ecosystem to drive relevant changes to the network infrastructure.
+
+CTS executes one or more automation tasks with the most recent service variable values from the Consul service catalog. Each task consists of a runbook automation written as a CTS compatible Terraform module using resources and data sources for the underlying network infrastructure. The `consul-terraform-sync` daemon runs on the same node as a Consul agent.
+
+CTS is available as an open source and enterprise distribution. Follow the [Automate your network configuration with Consul-Terraform-Sync tutorial](/consul/tutorials/network-infrastructure-automation/consul-terraform-sync-intro?utm_source=docs) to get started with CTS OSS or read more about [CTS Enterprise](/consul/docs/enterprise/cts).
+
+## Use Cases
+
+**Application teams must wait for manual changes in the network to release, scale up/down and re-deploy their applications.** This creates a bottleneck, especially in frequent workflows related to scaling up/down the application, breaking the DevOps goal of self-service enablement. CTS automates this process, thus decreasing the possibility of human error in manually editing configuration files, as well as decreasing the overall time taken to push out configuration changes.
+
+**Networking and security teams cannot scale processes to the speed and changes needed.** Manual approaches don't scale well, causing backlogs in network and security teams. Even in organizations that have some amount of automation (such as scripting), there is a need for an accurate, real-time source of data to trigger and drive their network automation workflows. CTS runs in near real-time to keep up with the rate of change.
+
+## Glossary
+
+- `Condition` - A task-level defined environmental requirement. When a task's condition is met, CTS executes that task to update network infrastructure. Depending on the condition type, the condition definition may also define and enable the module input that the task provides to the configured Terraform Module.
+
+- `Consul objects` - Consul objects are the response request objects returned from the Consul API that CTS monitor for changes. Examples of Consul objects can be service instance information, Consul key-value pairs, and service registration. The Consul objects are used to inform a task's condition and/or module input.
+
+- `Consul-Terraform-Sync (CTS)` - [GitHub repo](https://github.com/hashicorp/consul-terraform-sync) and binary/CLI name for the project that is used to perform Network Infrastructure Automation.
+
+- `Dynamic Tasks` - A dynamic task is a type of task that is dynamically triggered on a change to any relevant Consul catalog values e.g. service instances, Consul KV, catalog-services. See scheduled tasks for a type of non-dynamic task.
+
+ -> **Note:** The terminology "tasks" used throughout the documentation refers to all types of tasks except when specifically stated otherwise.
+
+- `Network Drivers` - CTS uses [network drivers](/consul/docs/automate/infrastructure/network-driver) to execute and update network infrastructure. Drivers transform Consul service-level information into downstream changes by processing and abstracting API and resource details tied to specific network infrastructure.
+
+- `Network Infrastructure Automation (NIA)` - Enables dynamic updates to network infrastructure devices triggered when specific conditions, such as service changes and registration, are met.
+
+- `Scheduled Tasks` - A scheduled task is a type of task that is triggered only on a schedule. It is configured with a [schedule condition](/consul/docs/nia/configuration#schedule-condition).
+
+- `Services` - A service in CTS represents a service that is registered with Consul for service discovery. Services are grouped by their service names. There may be more than one instance of a particular service, each with its own unique ID. CTS monitors services based on service names and can provide service instance details to a Terraform module for network automation.
+
+- `Module Input` - A module input defines objects that provide values or metadata to the Terraform module. See [module input](/consul/docs/nia/terraform-modules#module-input) for the supported metadata and values. For example, a user can configure a Consul KV module input to provide KV pairs as variables to their respective Terraform Module.
+
+ The module input can be configured in a couple of ways:
+ - Setting the `condition` block's `use_as_module_input` field to true
+ - Field was previously named `source_includes_var` (deprecated)
+ - Configuring `module_input` block(s)
+ - Block was previously named `source_input` (deprecated)
+
+ ~> "Module input" was renamed from "source input" in CTS 0.5.0 due to updates to the configuration names seen above.
+
+ -> **Note:** The terminology "tasks" used throughout the documentation refers to all types of tasks except when specifically stated otherwise.
+
+- `Tasks` - A task is the translation of dynamic service information from the Consul Catalog into network infrastructure changes downstream.
+
+- `HCP Terraform` - Per the [Terraform documentation](/terraform/cloud-docs), "HCP Terraform" describes both HCP Terraform and Terraform Enterprise, which are different distributions of the same application. Documentation will apply to both distributions unless specifically stated otherwise.
+
+- `Terraform Module` - A [Terraform module](/terraform/language/modules) is a container for multiple Terraform resources that are used together.
+
+- `Terraform Provider` - A [Terraform provider](/terraform/language/providers) is responsible for understanding API interactions and exposing resources for an infrastructure type.
+
+## Getting Started With Network Infrastructure Automation
+
+The [Network Infrastructure Automation (NIA)](/consul/tutorials/network-infrastructure-automation?utm_source=docs)
+collection contains examples on how to configure CTS to
+perform Network Infrastructure Automation. The collection contains also a
+tutorial to secure your CTS configuration for a production
+environment and one to help you build you own CTS compatible
+module.
+
+## Community
+
+- [Contribute](https://github.com/hashicorp/consul-terraform-sync) to the open source project
+- [Report](https://github.com/hashicorp/consul-terraform-sync/issues) bugs or request enhancements
+- [Discuss](https://discuss.hashicorp.com/tags/c/consul/29/consul-terraform-sync) with the community or ask questions
+- [Build integrations](/consul/docs/automate/infrastructure/module) for CTS
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/install.mdx b/website/content/docs/automate/infrastructure/install.mdx
new file mode 100644
index 000000000000..0fbecedf675a
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/install.mdx
@@ -0,0 +1,124 @@
+---
+layout: docs
+page_title: Install Consul and Consul-Terraform-Sync
+description: >-
+ Consul-Terraform-Sync is a daemon that runs alongside Consul. Consul-Terraform-Sync is not included with the Consul binary and will need to be installed separately.
+---
+
+# Install Consul-Terraform-Sync
+
+Refer to the [introduction](/consul/tutorials/network-infrastructure-automation/consul-terraform-sync-intro?utm_source=docs) tutorial for details about installing, configuring, and running Consul-Terraform-Sync (CTS) on your local machine with the Terraform driver.
+
+## Install Consul-Terraform-Sync
+
+
+
+
+To install CTS, find the [appropriate package](https://releases.hashicorp.com/consul-terraform-sync/) for your system and download it as a zip archive. For the CTS Enterprise binary, download a zip archive with the `+ent` metadata. [CTS Enterprise requires a Consul Enterprise license](/consul/docs/enterprise/license/cts) to run.
+
+Unzip the package to extract the binary named `consul-terraform-sync`. Move the `consul-terraform-sync` binary to a location available on your `PATH`.
+
+Example:
+
+```shell-session
+$ echo $PATH
+/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
+$ mv ./consul-terraform-sync /usr/local/bin/consul-terraform-sync
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option. The version outputted for the CTS Enterprise binary includes the `+ent` metadata.
+
+```shell-session
+$ consul-terraform-sync -version
+```
+
+
+
+
+Install and run CTS as a [Docker container](https://hub.docker.com/r/hashicorp/consul-terraform-sync).
+
+For the CTS Enterprise, use the Docker image [`hashicorp/consul-terraform-sync-enterprise`](https://hub.docker.com/r/hashicorp/consul-terraform-sync-enterprise).
+
+```shell-session
+$ docker pull hashicorp/consul-terraform-sync
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option. The version outputted for the CTS Enterprise image includes the `+ent` metadata.
+
+```shell-session
+$ docker run --rm hashicorp/consul-terraform-sync -version
+```
+
+
+
+
+The CTS OSS binary is available in the HashiCorp tap, which is a repository of all our Homebrew packages.
+
+```shell-session
+$ brew tap hashicorp/tap
+$ brew install hashicorp/tap/consul-terraform-sync
+```
+
+Run the following command to update to the latest version:
+
+```shell-session
+$ brew upgrade hashicorp/tap/consul-terraform-sync
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option.
+
+```shell-session
+$ consul-terraform-sync -version
+```
+
+
+
+
+Clone the repository from GitHub [`hashicorp/consul-terraform-sync`](https://github.com/hashicorp/consul-terraform-sync) to build and install the CTS OSS binary in your path `$GOPATH/bin`. Building from source requires `git` and [Golang](https://go.dev/).
+
+```shell-session
+$ git clone https://github.com/hashicorp/consul-terraform-sync.git
+$ cd consul-terraform-sync
+$ git checkout tags/
+$ go install
+```
+
+Once installed, verify the installation works by prompting the `-version` or `-help` option.
+
+```shell-session
+$ consul-terraform-sync -version
+```
+
+
+
+
+## Connect your Consul Cluster
+
+CTS connects with your Consul cluster in order to monitor the Consul catalog for service changes. These service changes lead to downstream updates to your network devices. You can configure your Consul cluster in CTS with the [Consul block](/consul/docs/nia/configuration#consul). Below is an example:
+
+```hcl
+consul {
+ address = "localhost:8500"
+ token = "my-consul-acl-token"
+}
+```
+
+## Connect your Network Device
+
+CTS interacts with your network device through a network driver. For the Terraform network driver, CTS uses Terraform providers to make changes to your network infrastructure resources. You can reference existing provider docs on the Terraform Registry to configure each provider or create a new Terraform provider.
+
+Once you have identified a Terraform provider for all of your network devices, you can configure them in CTS with a [`terraform_provider` block](/consul/docs/nia/configuration#terraform-provider) for each network device. Below is an example:
+
+```hcl
+terraform_provider "fake-firewall" {
+ address = "10.10.10.10"
+ username = "admin"
+ password = "password123"
+}
+```
+
+This provider is then used by task(s) to execute a Terraform module that will update the related network device.
+
+### Multiple Instances per Provider
+
+You might have multiple instances of the same type of network device; for example, multiple instances of a firewall or load balancer. You can configure each instance with its own provider block and distinguish it by the `alias` meta-argument. See [multiple provider configurations](/consul/docs/nia/configuration#multiple-provider-configurations) for more details and an example of the configuration.
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/module.mdx b/website/content/docs/automate/infrastructure/module.mdx
new file mode 100644
index 000000000000..60340d4ecb02
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/module.mdx
@@ -0,0 +1,384 @@
+---
+layout: docs
+page_title: Compatible Terraform Modules for NIA
+description: >-
+ Consul-Terraform-Sync automates execution Terraform modules for network infrastructure automation.
+---
+
+# Compatible Terraform Modules for Network Infrastructure Automation
+
+Consul-Terraform-Sync (CTS) automates execution of Terraform modules through tasks. A task is a construct in CTS that defines the automation of Terraform and the module.
+
+## Module Specifications
+
+Compatible modules for CTS follow the [standard module](/terraform/language/modules/develop#module-structure) structure. Modules can use syntax supported by Terraform version 0.13 and newer.
+
+### Compatibility Requirements
+
+Below are the two required elements for module compatibility with CTS
+
+1. **Root module** - Terraform has one requirement for files in the root directory of the repository to function as the primary entrypoint for the module. It should encapsulate the core logic to be used by CTS for task automation. `main.tf` is the recommended filename for the main file where resources are created.
+2. [**`services` input variable**](#services-variable) - CTS requires all modules to have the following input variable declared within the root module. The declaration of the `services` variable can be included at the top of the suggested `variables.tf` file where other input variables are commonly declared. This variable functions as the response object from the Consul catalog API and surfaces network information to be consumed by the module. It is structured as a map of objects.
+
+### Optional Input Variables
+
+In addition to the required `services` input variable, CTS provides additional, optional input variables to be used within your module. Support for an optional input variable requires two changes:
+
+1. Updating the Terraform Module to declare the input variable in the suggested `variables.tf`
+1. Adding configuration to the CTS task block to define the module input values that should be provided to the input variables
+
+See below sections for more information on [defining module input](#module-input) and [declaring optional input variables](#how-to-create-a-compatible-terraform-module) in your Terraform module.
+
+### Module Input ((#source-input))
+
+A task monitors [Consul objects](/consul/docs/nia#consul-objects) that are defined by the task's configuration. The Consul objects can be used for the module input that satisfies the requirements defined by the task's Terraform module's [input variables](/terraform/language/values/variables).
+
+A task's module input is slightly different from the task's condition, even though both monitor defined objects. The task's condition monitors defined objects with a configured criteria. When this criteria is satisfied, the task will trigger.
+
+The module input, however, monitors defined objects with the intent of providing values or metadata about these objects to the Terraform module. The monitored module input and condition objects can be the same object, such as a task configured with a `condition "services"` block and `use_as_module_input` set to `true`. The module input and condition can also be different objects and configured separately, such as a task configured with a `condition "catalog-services` and `module_input "consul-kv"` block. As a result, the monitored module input is decoupled from the provided condition in order to satisfy the Terraform module.
+
+Each type of object that CTS monitors can only be defined through one configuration within a task definition. For example, if a task monitors services, the task cannot have both `condition "services"` and `module_input "services"` configured. See [Task Module Input configuration](/consul/docs/nia/configuration#task-module-input) for more details.
+
+There are a few ways that a module input can be defined:
+
+- [**`services` list**](/consul/docs/nia/configuration#services) (deprecated) - The list of services to use as module input.
+- **`condition` block's `use_as_module_input` field** - When set to true, the condition's objects are used as module input.
+ - Field was previously named `source_includes_var` (deprecated)
+- [**`module_input` blocks**](/consul/docs/nia/configuration#module-input) - This block can be configured multiple times to define objects to use as module input.
+ - Block was previously named `source_input` (deprecated)
+
+Multiple ways of defining a module input adds configuration flexibility, and allows for optional additional input variables to be supported by CTS alongside the `services` input variable.
+
+Additional optional input variable types:
+
+- [**`catalog_services` variable**](#catalog-services-variable)
+- [**`consul_kv` variable**](#consul-kv-variable)
+
+#### Services Module Input ((#services-source-input))
+
+Tasks configured with a services module input monitor for changes to services. Monitoring is either performed on a configured list of services or on any services matching a provided regex.
+
+Sample rendered services input:
+
+
+
+```hcl
+services = {
+ "web.test-server.dc1" = {
+ id = "web"
+ name = "web"
+ kind = ""
+ address = "127.0.0.1"
+ port = 80
+ meta = {}
+ tags = ["example"]
+ namespace = ""
+ status = "passing"
+ node = "pm8902"
+ node_id = "307625d3-a1cf-9e85-ff81-12017ca4d848"
+ node_address = "127.0.0.1"
+ node_datacenter = "dc1"
+ node_tagged_addresses = {
+ lan = "127.0.0.1"
+ lan_ipv4 = "127.0.0.1"
+ wan = "127.0.0.1"
+ wan_ipv4 = "127.0.0.1"
+ }
+ node_meta = {
+ consul-network-segment = ""
+ }
+ },
+}
+```
+
+
+
+In order to configure a task with the services module input, the list of services that will be used for the input must be configured in one of the following ways:
+
+- the task's [`services`](/consul/docs/nia/configuration#services) (deprecated)
+- a [`condition "services"` block](/consul/docs/nia/configuration#services-condition) configured with `use_as_module_input` field set to true
+ - Field was previously named `source_includes_var` (deprecated)
+- a [`module_input "services"` block](/consul/docs/nia/configuration#services-module-input)
+ - Block was previously named `source_input "services"` (deprecated)
+
+The services module input operates by monitoring the [Health List Nodes For Service API](/consul/api-docs/health#list-nodes-for-service) and provides the latest service information to the input variables. A complete list of service information that would be provided to the module is expanded below:
+
+| Attribute | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------- |
+| `id` | A unique Consul ID for this service. The service id is unique per Consul agent. |
+| `name` | The logical name of the service. Many service instances may share the same logical service name. |
+| `address` | IP address of the service host -- if empty, node address should be used. |
+| `port` | Port number of the service |
+| `meta` | List of user-defined metadata key/value pairs for the service |
+| `tags` | List of tags for the service |
+| `namespace` | Consul Enterprise namespace of the service instance |
+| `status` | Representative status for the service instance based on an aggregate of the list of health checks |
+| `node` | Name of the Consul node on which the service is registered |
+| `node_id` | ID of the node on which the service is registered. |
+| `node_address` | The IP address of the Consul node on which the service is registered. |
+| `node_datacenter` | Data center of the Consul node on which the service is registered. |
+| `node_tagged_addresses` | List of explicit LAN and WAN IP addresses for the agent |
+| `node_meta` | List of user-defined metadata key/value pairs for the node |
+
+Below is an example configuration for a task that will execute on a schedule and provide information about the services matching the `regexp` parameter to the task's module.
+
+```hcl
+task {
+ name = "services_condition_task"
+ description = "execute on changes to services whose name starts with web"
+ providers = ["my-provider"]
+ module = "path/to/services-condition-module"
+ condition "schedule" {
+ cron = "* * * * Mon"
+ }
+ module_input "services" {
+ regexp = "^web.*"
+ }
+}
+```
+
+#### Consul KV Module Input ((#consul-kv-source-input))
+
+Tasks configured with a Consul KV module input monitor Consul KV for changes to KV pairs that satisfy the provided configuration. The Consul KV module input operates by monitoring the [Consul KV API](/consul/api-docs/kv#read-key) and provides these key values to the task's module.
+
+Sample rendered consul KV input:
+
+
+
+```hcl
+consul_kv = {
+ "my-key" = "some value"
+}
+```
+
+
+
+To configure a task with the Consul KV module input, the KVs which will be used for the input must be configured in one of the following ways:
+
+- a [`condition "consul-kv"` block](/consul/docs/nia/configuration#consul-kv-condition) configured with the `use_as_module_input` field set to true.
+ - Field was previously named `source_includes_var` (deprecated)
+- a [`module_input "consul-kv"` block](/consul/docs/nia/configuration#consul-kv-module-input).
+ - Block was previously named `source_input "consul-kv"` (deprecated)
+
+Below is a similar example to the one provided in the [Consul KV Condition](/consul/docs/nia/tasks#consul-kv-condition) section. However, the difference in this example is that instead of triggering based on a change to Consul KV, this task will instead execute on a schedule. Once execution is triggered, Consul KV information is then provided to the task's module.
+
+```hcl
+task {
+ name = "consul_kv_schedule_task"
+ description = "executes on Monday monitoring Consul KV"
+ module = "path/to/consul-kv-module"
+
+ condition "schedule" {
+ cron = "* * * * Mon"
+ }
+
+ module_input "consul-kv" {
+ path = "my-key"
+ recurse = true
+ datacenter = "dc1"
+ namespace = "default"
+ }
+}
+```
+
+#### Catalog Services Module Input ((#catalog-services-source-input))
+
+Tasks configured with a Catalog Services module input monitors for service and tag information provided by the [Catalog List Services API](/consul/api-docs/catalog#list-services). The module input is a map of service names to a list of tags.
+
+Sample rendered catalog-services input:
+
+
+
+```hcl
+catalog_services = {
+ "api" = ["prod", "staging"]
+ "consul" = []
+ "web" = ["blue", "green"]
+}
+```
+
+
+
+To configure a task with the Catalog Services module input, the catalog services which will be used for the input must be configured in one of the following ways:
+
+- a [`condition "catalog-services"` block](/consul/docs/nia/configuration#consul-kv-condition) configured with `use_as_module_input` field.
+ - Field was previously named `source_includes_var` (deprecated)
+
+-> **Note:** Currently there is no support for a `module_input "catalog-services"` block.
+
+Example of a catalog-services condition which supports module input through `use_as_module_input`:
+
+```hcl
+task {
+ name = "catalog_services_condition_task"
+ description = "execute on registration/deregistration of services"
+ providers = ["my-provider"]
+ module = "path/to/catalog-services-module"
+ condition "catalog-services" {
+ datacenter = "dc1"
+ namespace = "default"
+ regexp = "web.*"
+ use_as_module_input = true
+ node_meta {
+ key = "value"
+ }
+ }
+}
+```
+
+## How to Create a Compatible Terraform Module
+
+You can read more on how to [create a module](/terraform/language/modules/develop) or work through a [tutorial to build a module](/terraform/tutorials/modules/module-create?utm_source=docs). CTS is designed to integrate with any module that satisfies the specifications in the following section.
+
+The repository [hashicorp/consul-terraform-sync-template-module](https://github.com/hashicorp/consul-terraform-sync-template-module) can be cloned and used as a starting point for structuring a compatible Terraform module. The template repository has the files described in the next steps prepared.
+
+First, create a directory to organize Terraform configuration files that make up the module. You can start off with creating two files `main.tf` and `variables.tf` and expand from there based on your module and network infrastructure automation needs.
+
+The `main.tf` is the entry point of the module and this is where you can begin authoring your module. It can contain multiple Terraform resources related to an automation task that uses Consul service discovery information, particularly the required [`services` input variable](#services-variable). The code example below shows a resource using the `services` variable. When this example is used in automation with CTS, the content of the local file would dynamically update as Consul service discovery information changes.
+
+
+
+```hcl
+# Create a file with service names and their node addresses
+resource "local_file" "consul_services" {
+ content = join("\n", [
+ for _, service in var.services : "${service.name} ${service.id} ${service.node_address}"
+ ])
+ filename = "consul_services.txt"
+}
+```
+
+
+
+Something important to consider before authoring your module is deciding the [condition under which it will execute](/consul/docs/nia/tasks#task-execution). This will allow you to potentially use other types of CTS provided input variables in your module. It will also help inform your documentation and how users should configure their task for your module.
+
+### Services Variable
+
+To satisfy the specification requirements for a compatible module, copy the `services` variable declaration to the `variables.tf` file. Your module can optionally have other [variable declarations](#module-input-variables) and [CTS provided input variables](/consul/docs/nia/terraform-modules#optional-input-variables) in addition to `var.services`.
+
+
+
+```hcl
+variable "services" {
+ description = "Consul services monitored by Consul-Terraform-Sync"
+ type = map(
+ object({
+ id = string
+ name = string
+ kind = string
+ address = string
+ port = number
+ meta = map(string)
+ tags = list(string)
+ namespace = string
+ status = string
+
+ node = string
+ node_id = string
+ node_address = string
+ node_datacenter = string
+ node_tagged_addresses = map(string)
+ node_meta = map(string)
+
+ cts_user_defined_meta = map(string)
+ })
+ )
+}
+```
+
+
+
+Keys of the `services` map are unique identifiers of the service across Consul agents and data centers. Keys follow the format `service-id.node.datacenter` (or `service-id.node.namespace.datacenter` for Consul Enterprise). A complete list of attributes available for the `services` variable is included in the [documentation for CTS tasks](/consul/docs/nia/tasks#services-condition).
+
+Terraform variables when passed as module arguments can be [lossy for object types](/terraform/language/expressions/type-constraints#conversion-of-complex-types). This allows CTS to declare the full variable with every object attribute in the generated root module, and pass the variable to a child module that contains a subset of these attributes for its variable declaration. Modules compatible with CTS may simplify the `var.services` declaration within the module by omitting unused attributes. For example, the following services variable has 4 attributes with the rest omitted.
+
+
+
+```hcl
+variable "services" {
+ description = "Consul services monitored by Consul-Terraform-Sync"
+ type = map(
+ object({
+ id = string
+ name = string
+ node_address = string
+ port = number
+ status = string
+ })
+ )
+}
+```
+
+
+
+### Catalog Services Variable
+
+If you are creating a module for a [catalog-services condition](/consul/docs/nia/tasks#catalog-services-condition), then you have the option to add the `catalog_services` variable, which contains service registration and tag information. If your module would benefit from consuming this information, you can copy the `catalog_services` variable declaration to your `variables.tf` file in addition to the other variables.
+
+
+
+```hcl
+variable "catalog_services" {
+ description = "Consul catalog service names and tags monitored by Consul-Terraform-Sync"
+ type = map(list(string))
+}
+```
+
+
+
+The keys of the `catalog_services` map are the names of the services that are registered with Consul at the given datacenter. The value for each service name is a list of all known tags for that service.
+
+We recommend that if you make a module with with a catalog-services condition, that you document this in the README. This way, users that want to configure a task with your module will know to configure a catalog-services [condition](/consul/docs/nia/configuration#condition) block.
+
+Similarly, if you use the `catalog_services` variable in your module, we recommend that you also document this usage in the README. Users of your module will then know to set the catalog-services condition [`use_as_module_input`](/consul/docs/nia/configuration#catalog-services-condition) configuration to be true. When this field is set to true, CTS will declare the `catalog_services` variable in the generated root module, and pass the variable to a child module. Therefore, if this field is configured inconsistently, CTS will error and exit.
+
+### Consul KV Variable
+
+If you are creating a module for a [consul-kv condition](/consul/docs/nia/tasks#consul-kv-condition), then you have the option to add the `consul_kv` variable, which contains a map of the keys and values for the Consul KV pairs. If your module would benefit from consuming this information, you can copy the `consul_kv` variable declaration to your `variables.tf` file in addition to the other variables.
+
+
+
+```hcl
+variable "consul_kv" {
+ description = "Keys and values of the Consul KV pairs monitored by Consul-Terraform-Sync"
+ type = map(string)
+}
+```
+
+
+
+If your module contains the `consul_kv` variable, we recommend documenting the usage in the README file so that users know to set the [`use_as_module_input`](/consul/docs/nia/configuration#consul-kv-condition) configuration to `true` in the `consul-kv` condition. Setting the field to `true` instructs CTS to declare the `consul_kv` variable in the generated root module and pass the variable to a child module. Therefore, if this field is configured inconsistently, CTS will error and exit.
+
+### Module Input Variables
+
+Network infrastructure differs vastly across teams and organizations, and the automation needs of practitioners are unique based on their existing setup. [Input variables](/terraform/language/values/variables) can be used to serve as customization parameters to the module for practitioners.
+
+1. Identify areas in the module where practitioners could tailor the automation to fit their infrastructure.
+2. Declare input variables and insert the use of variables throughout module resources to expose these options to practitioners.
+3. Include descriptions to capture what the variables are and how they are used, and specify [custom validation rules for variables](/terraform/language/values/variables#custom-validation-rules) to provide context to users the expected format and conditions for the variables.
+4. Set reasonable default values for variables that are optional, or omit default values for variables that are required module arguments.
+5. Set the [sensitive argument](/terraform/language/values/variables) for variables that contain secret or sensitive values. When set, Terraform will redact the value from output when Terraform commands are run.
+
+Terraform is an explicit configuration language and requires variables to be declared, typed, and passed explicitly through as module arguments. CTS abstracts this by creating intermediate variables at the root level from the module input. These values are configured by practitioners within the [`task` block](/consul/docs/nia/configuration#variable_files). Value assignments are parsed to interpolate the corresponding variable declaration and are written to the appropriate Terraform files. A few assumptions are made for the intermediate variables: the variables users provide CTS are declared and supported by the module, matching name and type.
+
+### Module Guidelines
+
+This section covers guidelines for authoring compatible CTS modules.
+
+#### Scope
+
+We recommend scoping the module to a few related resources for a provider. Small modules are easier and more flexible for end users to adopt for CTS. It allows them to iteratively combine different modules and use them as building blocks to meet their unique network infrastructure needs.
+
+#### Complexity
+
+Consider authoring modules with low complexity to reduce the run time for Terraform execution. Complex modules that have a large number of dependencies may result in longer runs, which adds delay to the near real time network updates.
+
+#### Providers
+
+The Terraform module must declare which providers it requires within the [`terraform.required_providers` block](/terraform/language/providers/requirements#requiring-providers). We suggest to also include a version constraint for the provider to specify which versions the module is compatible with.
+
+Aside from the `required_providers` block, provider configurations should not be included within the sharable module for network integrations. End users will configure the providers through CTS, and CTS will then translate provider configuration to the generated root module appropriately.
+
+#### Documentation
+
+Modules for CTS are Terraform modules and can effectively run independently from the `consul-terraform-sync` daemon and Consul environment. They should be written and designed with Terraform best practices and should be clear to a Terraform user what the module does and how to use it. Module documentation should be named `README` or `README.md`. The description should capture what the module should be used for and the implications of running it in automation with CTS.
\ No newline at end of file
diff --git a/website/content/docs/nia/network-drivers/hcp-terraform.mdx b/website/content/docs/automate/infrastructure/network-driver/hcp-terraform.mdx
similarity index 99%
rename from website/content/docs/nia/network-drivers/hcp-terraform.mdx
rename to website/content/docs/automate/infrastructure/network-driver/hcp-terraform.mdx
index 4deb27ead9cf..05b501aeb919 100644
--- a/website/content/docs/nia/network-drivers/hcp-terraform.mdx
+++ b/website/content/docs/automate/infrastructure/network-driver/hcp-terraform.mdx
@@ -88,7 +88,7 @@ sync-tasks/
- `provider` blocks - The provider blocks generated in the root module resemble the `terraform_provider` blocks from the configuration for CTS. They have identical arguments present and are set from the intermediate variable created per provider.
- `module` block - The module block is where the task's module is called as a [child module](/terraform/language/modules#calling-a-child-module). The child module contains the core logic for automation. Required and optional input variables are passed as arguments to the module.
- `variables.tf` - This file contains three types of variable declarations:
- - `services` input variable (required) determines module compatibility with Consul-Terraform Sync (read more on [compatible Terraform modules](/consul/docs/nia/terraform-modules) for more details).
+ - `services` input variable (required) determines module compatibility with Consul-Terraform Sync (read more on [compatible Terraform modules](/consul/docs/automate/infrastructure/module) for more details).
- Any additional [optional input variables](/consul/docs/nia/terraform-modules#optional-input-variables) provided by CTS that the module may use.
- Various intermediate variables used to configure providers. Intermediate provider variables are interpolated from the provider blocks and arguments configured in the CTS configuration.
- `variables.module.tf` - This file is created if there are [variables configured for the task](/consul/docs/nia/configuration#variable_files) and contains the interpolated variable declarations that match the variables from configuration. These are then used to proxy the configured variables to the module through explicit assignment in the module block.
diff --git a/website/content/docs/automate/infrastructure/network-driver/index.mdx b/website/content/docs/automate/infrastructure/network-driver/index.mdx
new file mode 100644
index 000000000000..5a84ac4c33ff
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/network-driver/index.mdx
@@ -0,0 +1,33 @@
+---
+layout: docs
+page_title: Network Drivers
+description: >-
+ Consul-Terraform-Sync Network Drivers with Terraform and HCP Terraform
+---
+
+# Network Drivers
+
+Consul-Terraform-Sync (CTS) uses network drivers to execute and update network infrastructure. Drivers transform Consul service-level information into downstream changes by processing and abstracting API and resource details tied to specific network infrastructure.
+
+CTS is a HashiCorp solution to Network Infrastructure Automation. It bridges Consul's networking features and Terraform infrastructure management capabilities. The solution seamlessly embeds Terraform as network drivers to manage automation of Terraform modules. This expands the Consul ecosystem and taps into the rich features and community of Terraform and Terraform providers.
+
+The following table highlights some of the additional features Terraform and HCP Terraform offer when used as a network driver for CTS. Visit the [Terraform product page](https://www.hashicorp.com/products/terraform) or [contact our sales team](https://www.hashicorp.com/contact-sales) for a comprehensive list of features.
+
+| Network Driver | Description | Features |
+| -------------- | ----------- | -------- |
+| [Terraform driver](/consul/docs/automate/infrastructure/network-driver/terraform) | CTS automates a local installation of the [Terraform CLI](https://www.terraform.io/) | - Local Terraform execution - Local workspace directories - [Backend options](/consul/docs/nia/configuration#backend) available for state storage |
+| [HCP Terraform driver](/consul/docs/nia/network-drivers/terraform-cloud) | CTS Enterprise automates remote workspaces on [HCP Terraform](/terraform/cloud-docs) | - [Remote Terraform execution](/terraform/cloud-docs/run/remote-operations) - Concurrent runs - [Secured variables](/terraform/cloud-docs/workspaces/variables) - [State versions](/terraform/cloud-docs/workspaces/state) - [Sentinel](/terraform/cloud-docs/policy-enforcement) to enforce governance policies as code - Audit [logs](/terraform/enterprise/admin/infrastructure/logging) and [trails](/terraform/cloud-docs/api-docs/audit-trails) - Run [history](/terraform/cloud-docs/run/manage), [triggers](/terraform/cloud-docs/workspaces/settings/run-triggers), and [notifications](/terraform/cloud-docs/workspaces/settings/notifications) - [Terraform Cloud Agents](/terraform/cloud-docs/agents) |
+
+## Understanding Terraform Automation
+
+CTS automates Terraform execution using a templated configuration to carry out infrastructure changes. The auto-generated configuration leverages input variables sourced from Consul and builds on top of reusable Terraform modules published and maintained by HashiCorp partners and the community. CTS can also run your custom built modules that suit your team's specific network automation needs.
+
+The network driver for CTS determines how the Terraform automation operates. Visit the driver pages to read more about the [Terraform driver](/consul/docs/automate/infrastructure/network-driver/terraform) and the [HCP Terraform driver](/consul/docs/nia/network-drivers/terraform-cloud).
+
+### Upgrading Terraform
+
+Upgrading the Terraform version used by CTS may introduce breaking changes that can impact the Terraform modules. Refer to the Terraform [upgrade guides](/terraform/language/upgrade-guides) for details before upgrading.
+
+The following versions were identified as containing changes that may impact Terraform modules.
+
+- [Terraform v0.15](/terraform/language/v1.1.x/upgrade-guides/0-15)
diff --git a/website/content/docs/automate/infrastructure/network-driver/terraform.mdx b/website/content/docs/automate/infrastructure/network-driver/terraform.mdx
new file mode 100644
index 000000000000..57447cf998b1
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/network-driver/terraform.mdx
@@ -0,0 +1,61 @@
+---
+layout: docs
+page_title: Terraform Driver
+description: >-
+ Consul-Terraform-Sync Network Drivers with Terraform
+---
+
+# Terraform Driver
+
+Consul-Terraform-Sync (CTS) extends the Consul ecosystem to include Terraform as an officially supported tooling project. With the Terraform driver, CTS installs the [Terraform CLI](/terraform/downloads) locally and runs Terraform commands based on monitored Consul changes. This page details how the Terraform driver operates using local workspaces and templated files.
+
+## Terraform CLI Automation
+
+On startup, CTS:
+1. Downloads and installs Terraform
+2. Prepares local workspace directories. Terraform configuration and execution for each task is organized as separate [Terraform workspaces](/terraform/language/state/workspaces). The state files for tasks are independent of each other.
+3. Generates Terraform configuration files that make up the root module for each task.
+
+Once all workspaces are set up, CTS monitors the Consul catalog for service changes. When relevant changes are detected, the Terraform driver dynamically updates input variables for that task using a template to render them to a file named [`terraform.tfvars`](/consul/docs/nia/network-drivers#terraform-tfvars). This file is passed as a parameter to the Terraform CLI when executing `terraform plan` and `terraform apply` to update your network infrastructure with the latest Consul service details.
+
+### Local Workspaces
+
+Within the CTS configuration for a task, practitioners can select the desired module to run for the task as well as set the condition to execute the task. Each task executed by the Terraform driver corresponds to an automated root module that calls the selected module in an isolated Terraform environment. CTS will manage concurrent execution of these tasks.
+
+Autogenerated root modules for tasks are maintained in local subdirectories of the CTS working directory. Each subdirectory represents the local workspace for a task. By default, the working directory `sync-tasks` is created in the current directory. To configure where Terraform configuration files are stored, set [`working_dir`](/consul/docs/nia/configuration#working_dir) to the desired path or configure the [`task.working_dir`](/consul/docs/nia/configuration#working_dir-1) individually.
+
+~> **Note:** Although Terraform state files for task workspaces are independent, this does not guarantee the infrastructure changes from concurrent task executions are independent. Ensure that modules across all tasks are not modifying the same resource objects or have overlapping changes that may result in race conditions during automation.
+
+### Root Module
+
+The root module proxies Consul information, configuration, and other variables to the Terraform module for the task. The content of the files that make up the root module are sourced from CTS configuration, information for task's module to use as the automation playbook, and information from Consul such as service information.
+
+A working directory with one task named "cts-example" would have the folder structure below when running with the Terraform driver.
+
+```shell-session
+$ tree sync-tasks/
+
+sync-tasks/
+└── cts-example/
+ ├── main.tf
+ ├── variables.tf
+ ├── terraform.tfvars
+ └── terraform.tfvars.tmpl
+```
+
+The following files of the root module are generated for each task. An [example of a root module created by CTS](https://github.com/hashicorp/consul-terraform-sync/tree/master/examples) can be found in the project repository.
+
+- `main.tf` - The main file contains the terraform block, provider blocks, and a module block calling the module configured for the task.
+ - `terraform` block - The corresponding provider source and versions for the task from the configuration files are placed into this block for the root module. The Terraform backend from the configuration is also templated here.
+ - `provider` blocks - The provider blocks generated in the root module resemble the `terraform_provider` blocks from the configuration for CTS. They have identical arguments present and are set from the intermediate variable created per provider.
+ - `module` block - The module block is where the task's module is called as a [child module](/terraform/language/modules). The child module contains the core logic for automation. Required and optional input variables are passed as arguments to the module.
+- `variables.tf` - This file contains three types of variable declarations.
+ - `services` input variable (required) determines module compatibility with Consul-Terraform Sync (read more on [compatible Terraform modules](/consul/docs/automate/infrastructure/module) for more details).
+ - Any additional [optional input variables](/consul/docs/nia/terraform-modules#optional-input-variables) provided by CTS that the module may use.
+ - Various intermediate variables used to configure providers. Intermediate provider variables are interpolated from the provider blocks and arguments configured in the CTS configuration.
+- `variables.module.tf` - This file is created if there are [variables configured for the task](/consul/docs/nia/configuration#variable_files) and contains the interpolated variable declarations that match the variables from configuration. These are then used to proxy the configured variables to the module through explicit assignment in the module block.
+- `providers.tfvars` - This file is created if there are [providers configured for the task](/consul/docs/nia/configuration#providers) and defined [`terraform_provider` blocks](/consul/docs/nia/configuration#terraform-provider). This file may contain sensitive information. To omit sensitive information from this file, you can [securely configure Terraform providers for CTS](/consul/docs/nia/configuration#securely-configure-terraform-providers) using environment variables or templating.
+- `terraform.tfvars` - The variable definitions file is where the services input variable and any optional CTS input variables are assigned values from Consul. It is periodically updated, typically when the task condition is met, to reflect the current state of Consul.
+- `terraform.tfvars.tmpl` - The template file is used by CTS to template information from Consul by using the HashiCorp configuration and templating library ([hashicorp/hcat](https://github.com/hashicorp/hcat)).
+
+~> **Note:** Generated template and Terraform configuration files are crucial for the automation of tasks. Any manual changes to the files may not be preserved and could be overwritten by a subsequent update. Unexpected manual changes to the format of the files may cause automation to error.
diff --git a/website/content/docs/automate/infrastructure/requirements.mdx b/website/content/docs/automate/infrastructure/requirements.mdx
new file mode 100644
index 000000000000..f10d7ad35b13
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/requirements.mdx
@@ -0,0 +1,136 @@
+---
+layout: docs
+page_title: Requirements
+description: >-
+ Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module, and a running Consul cluster outside of the `consul-terraform-sync` daemon.
+---
+
+# Requirements
+
+The following components are required to run Consul-Terraform-Sync (CTS):
+
+- A Terraform provider
+- A Terraform module
+- A Consul cluster running outside of the `consul-terraform-sync` daemon
+
+You can add support for your network infrastructure through Terraform providers so that you can apply Terraform modules to implement network integrations.
+
+The following guidance is for running CTS using the Terraform driver. The HCP Terraform driver has [additional prerequisites](/consul/docs/nia/network-drivers/terraform-cloud#setting-up-terraform-cloud-driver).
+
+## Run a Consul cluster
+
+Below are several steps towards a minimum Consul setup required for running CTS.
+
+### Install Consul
+
+CTS is a daemon that runs alongside Consul, similar to other Consul ecosystem tools like Consul Template. CTS is not included with the Consul binary and needs to be installed separately.
+
+To install a local Consul agent, refer to the [Getting Started: Install Consul Tutorial](/consul/tutorials/get-started-vms?utm_source=docs).
+
+For information on compatible Consul versions, refer to the [Consul compatibility matrix](/consul/docs/nia/compatibility#consul).
+
+### Run an agent
+
+The Consul agent must be running in order to dynamically update network devices. Refer to the [Consul agent documentation](/consul/docs/fundamentals/agent) for information about configuring and starting a Consul agent.
+
+When running a Consul agent with CTS in production, consider that CTS uses [blocking queries](/consul/api-docs/features/blocking) to monitor task dependencies, such as changes to registered services. This results in multiple long-running TCP connections between CTS and the agent to poll changes for each dependency. Consul may quickly reach the agent connection limits if CTS is monitoring a high number of services.
+
+To avoid reaching the limit prematurely, we recommend using HTTP/2 (requires HTTPS) to communicate between CTS and the Consul agent. When using HTTP/2, CTS establishes a single connection and reuses it for all communication. Refer to the [Consul Configuration section](/consul/docs/nia/configuration#consul) for details.
+
+Alternatively, you can configure the [`limits.http_max_conns_per_client`](/consul/docs/reference/agent/configuration-file/general#http_max_conns_per_client) option to set a maximum number of connections to meet your needs.
+
+### Register services
+
+CTS monitors the Consul catalog for service changes that lead to downstream changes to your network devices. Without services, your CTS daemon is operational but idle. You can register services with your Consul agent by either loading a service definition or by sending an HTTP API request.
+
+The following HTTP API request example registers a service named `web` with your Consul agent:
+
+```shell-session
+$ echo '{
+ "ID": "web",
+ "Name": "web",
+ "Address": "10.10.10.10",
+ "Port": 8000
+}' > payload.json
+
+$ curl --request PUT --data @payload.json http://localhost:8500/v1/agent/service/register
+```
+
+The example represents a non-existent web service running at `10.10.10.10:8000` that is now available for CTS to consume.
+
+You can configure CTS to monitor the web service, execute a task, and update network device(s) by configuring `web` in the [`condition "services"`](/consul/docs/nia/configuration#services-condition) task block. If the web service has any non-default values, it can also be configured in `condition "services"`.
+
+For more details on registering a service using the HTTP API endpoint, refer to the [register service API docs](/consul/api-docs/agent/service#register-service).
+
+For hands-on instructions on registering a service by loading a service definition, refer to the [Getting Started: Register a Service with Consul Service Discovery Tutorial](/consul/tutorials/get-started-vms/virtual-machine-gs-service-discovery).
+
+### Run a cluster
+
+For production environments, we recommend operating a Consul cluster rather than a single agent. Refer to [Getting Started: Deploy a Consul Datacenter Tutorial](/consul/tutorials/get-started-vms/virtual-machine-gs-deploy) for instructions on starting multiple Consul agents and joining them into a cluster.
+
+## Network infrastructure using a Terraform provider
+
+CTS integrations for the Terraform driver use Terraform providers as plugins to interface with specific network infrastructure platforms. The Terraform driver for CTS inherits the expansive collection of Terraform providers to integrate with. You can also specify a provider `source` in the [`required_providers` configuration](/terraform/language/providers/requirements#requiring-providers) to use providers written by the community (requires Terraform 0.13 or later).
+
+### Finding Terraform providers
+
+To find providers for the infrastructure platforms you use, browse the providers section of the [Terraform Registry](https://registry.terraform.io/browse/providers).
+
+### How to create a provider
+
+If a Terraform provider does not exist for your environment, you can create a new Terraform provider and publish it to the registry so that you can use it within a network integration task or create a compatible Terraform module. Refer to the following Terraform tutorial and documentation for additional information on creating and publishing providers:
+
+- [Setup and Implement Read](/terraform/tutorials/providers/provider-setup)
+- [Publishing Providers](/terraform/registry/providers/publishing).
+
+## Network integration using a Terraform module
+
+The Terraform module for a task in CTS is the core component of the integration. It declares which resources to use and how your infrastructure is dynamically updated. The module, along with how it is configured within a task, determines the conditions under which your infrastructure is updated.
+
+Working with a Terraform provider, you can write an integration task for CTS by [creating a Terraform module](/consul/docs/automate/infrastructure/module) that is compatible with the Terraform driver. You can also use a [module built by partners](#partner-terraform-modules).
+
+Refer to [Configuration](/consul/docs/reference/cts) for information about configuring CTS and how to use Terraform providers and modules for tasks.
+
+### Partner Terraform Modules
+
+The modules listed below are available to use and are compatible with CTS.
+
+#### A10 Networks
+
+- Dynamic Load Balancing with Group Member Updates: [Terraform Registry](https://registry.terraform.io/modules/a10networks/service-group-sync-nia/thunder/latest) / [GitHub](https://github.com/a10networks/terraform-thunder-service-group-sync-nia)
+
+#### Avi Networks
+
+- Scale Up and Scale Down Pool and Pool Members (Servers): [GitHub](https://github.com/vmware/terraform-provider-avi/tree/20.1.5/modules/nia/pool)
+
+#### AWS Application Load Balancer (ALB)
+
+- Create Listener Rule and Target Group for an AWS ALB, Forward Traffic to Consul Ingress Gateway: [Terraform Registry](https://registry.terraform.io/modules/aws-quickstart/cts-alb_listener-nia/hashicorp/latest) / [GitHub](https://github.com/aws-quickstart/terraform-hashicorp-cts-alb_listener-nia)
+
+#### Checkpoint
+
+- Dynamic Firewalling with Address Object Updates: [Terraform Registry](https://registry.terraform.io/modules/CheckPointSW/dynobj-nia/checkpoint/latest) / [GitHub](https://github.com/CheckPointSW/terraform-checkpoint-dynobj-nia)
+
+#### Cisco ACI
+
+- Policy Based Redirection: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/autoscaling-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-autoscaling-nia)
+- Create and Update Cisco ACI Endpoint Security Groups: [Terraform Registry](https://registry.terraform.io/modules/CiscoDevNet/esg-nia/aci/latest) / [GitHub](https://github.com/CiscoDevNet/terraform-aci-esg-nia)
+
+#### Citrix ADC
+
+- Create, Update, and Delete Service Groups in Citrix ADC: [Terraform Registry](https://registry.terraform.io/modules/citrix/servicegroup-consul-sync-nia/citrixadc/latest) / [GitHub](https://github.com/citrix/terraform-citrixadc-servicegroup-consul-sync-nia)
+
+#### F5
+
+- Dynamic Load Balancing with Pool Member Updates: [Terraform Registry](https://registry.terraform.io/modules/f5devcentral/app-consul-sync-nia/bigip/latest) / [GitHub](https://github.com/f5devcentral/terraform-bigip-app-consul-sync-nia)
+
+#### NS1
+
+- Create, Delete, and Update DNS Records and Zones: [Terraform Registry](https://registry.terraform.io/modules/ns1-terraform/record-sync-nia/ns1/latest) / [GitHub](https://github.com/ns1-terraform/terraform-ns1-record-sync-nia)
+
+#### Palo Alto Networks
+
+- Dynamic Address Group (DAG) Tags: [Terraform Registry](https://registry.terraform.io/modules/PaloAltoNetworks/dag-nia/panos/latest) / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-dag-nia)
+- Address Group and Dynamic Address Group (DAG) Tags: [Terraform
+ Registry](https://registry.terraform.io/modules/PaloAltoNetworks/ag-dag-nia/panos/latest)
+ / [GitHub](https://github.com/PaloAltoNetworks/terraform-panos-ag-dag-nia)
diff --git a/website/content/docs/automate/infrastructure/run.mdx b/website/content/docs/automate/infrastructure/run.mdx
new file mode 100644
index 000000000000..531197d819aa
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/run.mdx
@@ -0,0 +1,38 @@
+---
+layout: docs
+page_title: Run Consul-Terraform-Sync
+description: >-
+ Consul-Terraform-Sync requires a Terraform Provider, a Terraform Module and a running Consul Cluster outside of the `consul-terraform-sync` daemon.
+---
+
+# Run Consul-Terraform-Sync
+
+This topic describes the basic procedure for running Consul-Terraform-Sync (CTS). Verify that you have met the [basic requirements](/consul/docs/automate/infrastructure/requirements) before attempting to run CTS.
+
+1. Move the `consul-terraform-sync` binary to a location available on your `PATH`.
+
+ ```shell-session
+ $ mv ~/Downloads/consul-terraform-sync /usr/local/bin/consul-terraform-sync
+ ```
+
+2. Create the config.hcl file and configure the options for your use case. Refer to the [configuration reference](/consul/docs/reference/cts) for details about all CTS configurations.
+
+3. Run Consul-Terraform-Sync (CTS).
+
+ ```shell-session
+ $ consul-terraform-sync start -config-file
+ ```
+
+4. Check status of tasks. Replace port number if configured in Step 2. Refer to [Consul-Terraform-Sync API](/consul/docs/reference/cts/api) for additional information.
+
+ ```shell-session
+ $ curl localhost:8558/status/tasks
+ ```
+
+## Other Run modes
+
+You can [configure CTS for high availability](/consul/docs/automate/infrastructure/high-availability), which is an enterprise capability that ensures that all changes to Consul that occur during a failover transition are processed and that CTS continues to operate as expected.
+
+You can start CTS in [inspect mode](/consul/docs/nia/cli/start#modes) to review and test your configuration before applying any changes. Inspect mode allows you to verify that the changes work as expected before running them in an unsupervised daemon mode.
+
+For hands-on instructions on using inspect mode, refer to the [Consul-Terraform-Sync Run Modes and Status Inspection](/consul/tutorials/network-infrastructure-automation/consul-terraform-sync-run-and-inspect?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial.
\ No newline at end of file
diff --git a/website/content/docs/automate/infrastructure/task.mdx b/website/content/docs/automate/infrastructure/task.mdx
new file mode 100644
index 000000000000..ec58559b2231
--- /dev/null
+++ b/website/content/docs/automate/infrastructure/task.mdx
@@ -0,0 +1,272 @@
+---
+layout: docs
+page_title: Tasks
+description: >-
+ Consul-Terraform-Sync Tasks
+---
+
+# Tasks
+
+A task is the translation of dynamic service information from the Consul Catalog into network infrastructure changes downstream. Consul-Terraform-Sync (CTS) carries out automation for executing tasks using network drivers. For a Terraform driver, the scope of a task is a Terraform module.
+
+Below is an example task configuration:
+
+```hcl
+task {
+ name = "frontend-firewall-policies"
+ description = "Add firewall policy rules for frontend services"
+ providers = ["fake-firewall", "null"]
+ module = "example/firewall-policy/module"
+ version = "1.0.0"
+ condition "services" {
+ names = ["web", "image"]
+ }
+}
+```
+
+In the example task above, the "fake-firewall" and "null" providers, listed in the `providers` field, are used. These providers themselves should be configured in their own separate [`terraform_provider` blocks](/consul/docs/nia/configuration#terraform-provider). These providers are used in the Terraform module "example/firewall-policy/module", configured in the `module` field, to create, update, and destroy resources. This module may do something like use the providers to create and destroy firewall policy objects based on IP addresses. The IP addresses come from the "web" and "image" service instances configured in the `condition "services"` block. This service-level information is retrieved by CTS which watches Consul catalog for changes.
+
+See [task configuration](/consul/docs/nia/configuration#task) for more details on how to configure a task.
+
+A task can be either enabled or disabled using the [task cli](/consul/docs/reference/cli/cts/task). When enabled, tasks are executed and automated as described in sections below. However, disabled tasks do not execute when changes are detected from Consul catalog. Since disabled tasks do not execute, they also do not store [events](/consul/docs/nia/tasks#event) until re-enabled.
+
+## Task Execution
+
+An enabled task can be configured to monitor and execute on different types of conditions, such as changes to services ([services condition](/consul/docs/nia/tasks#services-condition)) or service registration and deregistration ([catalog-services condition](/consul/docs/nia/tasks#catalog-services-condition)).
+
+A task can also monitor, but not execute on, other variables that provide additional information to the task's module. For example, a task with a catalog-services condition may execute on registration changes and additionally monitor service instances for IP information.
+
+All configured monitored information, regardless if it's used for execution or not, can be passed to the task's module as module input. Below are details on the types of execution conditions that CTS supports and their module inputs.
+
+### Services Condition
+
+Tasks with the services condition monitor and execute on either changes to a list of configured services or changes to any services that match a given regex.
+
+There are two ways to configure a task with a services condition. Only one of the two options below can be configured for a single task:
+1. Configure a task's [`services`](/consul/docs/nia/configuration#services) field (deprecated) to specify the list of services to trigger the task
+1. Configure a task's `condition` block with the [services condition](/consul/docs/nia/configuration#services-condition) type to specify services to trigger the task.
+
+The services condition operates by monitoring the [Health List Nodes For Service API](/consul/api-docs/health#list-nodes-for-service) and executing the task on any change of information for services configured. These changes include one or more changes to service values, like IP address, added or removed service instance, or tags. A complete list of values that would cause a task to run are expanded below:
+
+| Attribute | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------- |
+| `id` | A unique Consul ID for this service. This is unique per Consul agent. |
+| `name` | The logical name of the service. Many service instances may share the same logical service name. |
+| `address` | IP address of the service host -- if empty, node address should be used. |
+| `port` | Port number of the service |
+| `meta` | List of user-defined metadata key/value pairs for the service |
+| `tags` | List of tags for the service |
+| `namespace` | Consul Enterprise namespace of the service instance |
+| `status` | Representative status for the service instance based on an aggregate of the list of health checks |
+| `node` | Name of the Consul node on which the service is registered |
+| `node_id` | ID of the node on which the service is registered. |
+| `node_address` | The IP address of the Consul node on which the service is registered. |
+| `node_datacenter` | Data center of the Consul node on which the service is registered. |
+| `node_tagged_addresses` | List of explicit LAN and WAN IP addresses for the agent |
+| `node_meta` | List of user-defined metadata key/value pairs for the node |
+
+Below is an example configuration for a task that will execute when a service with a name that matches the regular expression has a change.
+
+```hcl
+task {
+ name = "services_condition_task"
+ description = "execute on changes to services whose name starts with web"
+ providers = ["my-provider"]
+ module = "path/to/services-condition-module"
+
+ condition "services" {
+ regexp = "^web.*"
+ use_as_module_input = false
+ }
+}
+```
+
+The services condition can provide input for the [`services` input variable](/consul/docs/nia/terraform-modules#services-variable) that is required for each CTS module. This can be provided depending on how the services condition is configured:
+- task's `services` field (deprecated): services object is automatically passed as module input
+- task's `condition "services"` block: users can configure the `use_as_module_input` field to optionally use the condition's services object as module input
+ - Field was previously named `source_includes_var` (deprecated)
+
+### Catalog-Services Condition
+
+Tasks with a catalog-services condition monitor and execute on service registration changes for services that satisfy the condition configuration. 'Service registration changes' specifically refers to service registration and deregistration where service registration occurs on the first service instance registration, and service deregistration occurs on the last service instance registration. Tasks with a catalog-services condition may, depending on the module, additionally monitor but not execute on service instance information.
+
+The catalog-services condition operates by monitoring the [Catalog List Services API](/consul/api-docs/catalog#list-services) and executing the task when services are added or removed in the list of registered services. Note, the task does not execute on changes to the tags of the list of services. This is similar to how changes to service instance information, mentioned above, also does not execute a task.
+
+Below is an example configuration for a task that will execute when a service with a name that matches the "web.*" regular expression in datacenter "dc1" has a registration change. It additionally monitors but does not execute on service instance changes to "web-api" in datacenter "dc2".
+
+```hcl
+task {
+ name = "catalog_service_condition_task"
+ module = "path/to/catalog-services-module"
+ providers = ["my-provider"]
+
+ condition "catalog-services" {
+ datacenter = "dc1"
+ regexp = "web.*"
+ use_as_module_input = false
+ }
+
+ module_input "services" {
+ names = ["web-api"]
+ datacenter = "dc2"
+ }
+}
+```
+
+Using the condition block's `use_as_module_input` field, users can configure CTS to use the condition's object as module input for the [`catalog_services` input variable](/consul/docs/nia/terraform-modules#catalog-services-variable). Users can refer to the configured module's documentation on how to set `use_as_module_input`.
+
+See the [Catalog-Services Condition](/consul/docs/nia/configuration#catalog-services-condition) configuration section for further details and additional configuration options.
+
+### Consul KV Condition
+
+Tasks with a consul-kv condition monitor and execute on Consul KV changes for KV pairs that satisfy the condition configuration. The consul-kv condition operates by monitoring the [Consul KV API](/consul/api-docs/kv#read-key) and executing the task when a configured KV entry is created, deleted, or updated.
+
+Based on the `recurse` option, the condition either monitors a single Consul KV pair for a given path or monitors all pairs that are prefixed by that path. In the example below, because `recurse` is set to true, the `path` option is treated as a prefix. Changes to an entry with the key `my-key` and an entry with the key `my-key/another-key` would both trigger the task. If `recurse` were set to false, then only changes to `my-key` would trigger the task.
+
+```hcl
+task {
+ name = "consul_kv_condition_task"
+ description = "execute on changes to Consul KV entry"
+ module = "path/to/consul-kv-module"
+ providers = ["my-provider"]
+
+ condition "consul-kv" {
+ path = "my-key"
+ recurse = true
+ datacenter = "dc1"
+ namespace = "default"
+ use_as_module_input = true
+ }
+}
+```
+
+Using the condition block's `use_as_module_input` field, users can configure CTS to use the condition's object as module input for the [`consul_kv` input variable](/consul/docs/nia/terraform-modules#consul-kv-variable). Users can refer to the configured module's documentation on how to set `use_as_module_input`.
+
+See the [Consul-KV Condition](/consul/docs/nia/configuration#consul-kv-condition) configuration section for more details and additional configuration options.
+
+### Schedule Condition
+
+All scheduled tasks must be configured with a schedule condition. The schedule condition sets the cadence to trigger a task with a [`cron`](/consul/docs/nia/configuration#cron) configuration. The schedule condition block does not support parameters to configure module input. As a result, inputs must be configured separately. You can configure [`module_input` blocks](/consul/docs/nia/configuration#module_input) to define the module inputs.
+
+Below is an example configuration for a task that will execute every Monday, which is set by the schedule condition's [`cron`](/consul/docs/nia/configuration#cron) configuration. The module input is defined by the `module_input` block. When the task is triggered on Monday, it will retrieve the latest information on "web" and "db" from Consul and provide this to the module's input variables.
+
+```hcl
+task {
+ name = "scheduled_task"
+ description = "execute every Monday using service information from web and db"
+ module = "path/to/module"
+
+ condition "schedule" {
+ cron = "* * * * Mon"
+ }
+ module_input "services" {
+ names = ["web", "db"]
+ }
+}
+```
+
+Below are the available options for module input types and how to configure them:
+
+- [Services module input](/consul/docs/nia/terraform-modules/#services-module-input):
+ - [`task.services`](/consul/docs/nia/configuration#services) field (deprecated)
+ - [`module_input "services"`](/consul/docs/nia/configuration#services-configure-input) block
+ - Block was previously named `source_input "services"` (deprecated)
+- [Consul KV module input](/consul/docs/nia/terraform-modules/#consul-kv-module-input):
+ - [`module_input "consul-kv"`](/consul/docs/nia/configuration#consul-kv-module-input)
+ - Block was previously named `source_input "consul-kv"` (deprecated)
+
+#### Running Behavior
+
+Scheduled tasks generally run on schedule, but they can be triggered on demand when running CTS in the following ways:
+
+- [Long-running mode](/consul/docs/nia/cli#long-running-mode): At the beginning of the long-running mode, CTS first passes through a once-mode phase in which all tasks are executed once. Scheduled tasks will trigger once during this once-mode phase. This behavior also applies to tasks that are not scheduled. After once-mode has completed, scheduled tasks subsequently trigger on schedule.
+
+- [Inspect mode](/consul/docs/nia/cli#inspect-mode): When running in inspect mode, the terminal will output a plan of proposed updates that would be made if the tasks were to trigger at that moment and then exit. No changes are applied in this mode. The outputted plan for a scheduled task is also the proposed updates that would be made if the task was triggered at that moment, even if off-schedule.
+
+- [Once mode](/consul/docs/nia/cli#once-mode): During the once mode, all tasks are only triggered one time. Scheduled tasks will execute during once mode even if not on the schedule.
+
+- [Enable CLI](/consul/docs/nia/cli/task#task-enable): When a task is enabled through the CLI, any type of task, including scheduled tasks, will be triggered at that time.
+
+#### Buffer Period
+
+Because scheduled tasks trigger on a configured cadence, buffer periods are disabled for scheduled tasks. Any configured `buffer_period` at the global level or task level will only apply to dynamic tasks and not scheduled ones.
+
+#### Events
+
+[Events](#event) are stored each time a task executes. For scheduled tasks, an event will be stored each time the task triggers on schedule regardless of if there was a change in Consul catalog.
+
+## Task Automation
+
+CTS will attempt to execute each enabled task once upon startup to synchronize infrastructure with the current state of Consul. The daemon will stop and exit if any error occurs while preparing the automation environment or executing a task for the first time. This helps ensure tasks have proper configuration and are executable before the daemon transitions into running tasks in full automation as service changes are discovered over time. As a result, it is not recommended to configure a task as disabled from the start. After all tasks have successfully executed once, task failures during automation will be logged and retried or attempted again after a subsequent change.
+
+Tasks are executed near-real time when service changes are detected. For services or environments that are prone to flapping, it may be useful to configure a [buffer period](/consul/docs/nia/configuration#buffer_period-1) for a task to accumulate changes before it is executed. The buffer period would reduce the number of consecutive network calls to infrastructure by batching changes for a task over a short duration of time.
+
+## Status Information
+
+Status-related information is collected and offered via [status API](/consul/docs/nia/api#status) to provide visibility into what and how the tasks are running. Information is offered in three-levels (lowest to highest):
+
+- Event data
+- Task status
+- Overall status
+
+These three levels form a hierarchy where each level of data informs the one higher. The lowest-level, event data, is collected each time a task runs to update network infrastructure. This event data is then aggregated to inform individual task statuses. The count distribution of all the task statuses inform the overall status's task summary.
+
+### Event
+
+When a task is triggered, CTS takes a series of steps in order to update the network infrastructure. These steps consist of fetching the latest data from Consul for the task's module inputs and then updating the network infrastructure accordingly. An event captures information across this process. It stores information to help understand if the update to network infrastructure was successful or not and any errors that may have occurred.
+
+A dynamic task will store an event when it is triggered by a change in Consul. A scheduled task will store an event when it is triggered on schedule, regardless if there is a change in Consul. A disabled task does not update network infrastructures, so it will not store events until until re-enabled.
+
+Sample event:
+
+```json
+{
+ "id": "ef202675-502f-431f-b133-ed64d15b0e0e",
+ "success": false,
+ "start_time": "2020-11-24T12:05:18.651231-05:00",
+ "end_time": "2020-11-24T12:05:20.900115-05:00",
+ "task_name": "task_b",
+ "error": {
+ "message": "example error: error while doing terraform-apply"
+ },
+ ...
+}
+```
+
+For complete information on the event structure, see [events in our API documentation](/consul/docs/nia/api#event). Event information can be retrieved by using the [`include=events` parameter](/consul/docs/nia/api#include) with the [task status API](/consul/docs/nia/api#task-status).
+
+### Task Status
+
+Each time a task runs to update network infrastructure, event data is stored for that run. 5 most recent events are stored for each task, and these stored events are used to determine task status. For example, if the most recent stored event is not successful but the others are, then the task's health status is "errored".
+
+Sample task status:
+
+```json
+{
+ "task_name": "task_b",
+ "status": "errored",
+ "providers": ["null"],
+ "services": ["web"],
+ "events_url": "/v1/status/tasks/task_b?include=events"
+}
+```
+
+Task status information can be retrieved with [task status API](/consul/docs/nia/api#task-status). The API documentation includes details on what health statuses are available and how it is calculated based on events' success/failure information.
+
+### Overall Status
+
+Overall status returns a summary of the health statuses across all tasks. The summary is the count of tasks in each health status category.
+
+Sample overall status:
+
+```json
+{
+ "task_summary": {
+ "successful": 28,
+ "errored": 5,
+ "critical": 1
+ }
+}
+```
+
+Overall status information can be retrieved with [overall status API](/consul/docs/nia/api#overall-status). The API documentation includes details on what health statuses are available and how it is calculated based on task statuses' health status information.
\ No newline at end of file
diff --git a/website/content/docs/automate/kv/index.mdx b/website/content/docs/automate/kv/index.mdx
new file mode 100644
index 000000000000..6568e414ce79
--- /dev/null
+++ b/website/content/docs/automate/kv/index.mdx
@@ -0,0 +1,111 @@
+---
+layout: docs
+page_title: Consul key/value (KV) store overview
+description: >-
+ Consul includes a KV store for indexed objects, configuration parameters, and metadata that you can use to dynamically configure apps. Learn about accessing and using the KV store to extend Consul's functionality through watches, sessions, and Consul Template.
+---
+
+# Consul key/value (KV) store overview
+
+Consul KV is a core feature of Consul and is installed with the Consul agent.
+Once installed with the agent, Consul KV has reasonable defaults. Consul KV
+lets you store indexed objects, though its main uses are storing
+configuration parameters and metadata. It is a basic KV store and is not
+intended to be a full featured datastore (such as DynamoDB).
+
+The Consul KV datastore is located on the servers, but any client or server
+agent may access it. The natively integrated [RPC
+functionality](/consul/docs/architecture/control-plane) lets clients
+forward requests to servers, including key/value reads and writes. Part of
+Consul's core design allows automatic data replication across all the
+servers. Having a quorum of servers decreases the risk of data loss if an
+outage occurs.
+
+If you have not used Consul KV, complete this [Getting Started
+tutorial](/consul/tutorials/interactive/get-started-key-value-store?utm_source=docs)
+on HashiCorp.
+
+
+The Consul KV API, CLI, and UI are now considered feature complete. No new feature development is planned for future releases.
+
+
+## Accessing the KV store
+
+Access the KV store with the [consul kv CLI subcommands](/consul/commands/kv),
+[HTTP API](/consul/api-docs/kv) and Consul UI. To restrict access, enable and
+configure [ACLs](/consul/docs/secure/acl). Once the ACL system has been
+bootstrapped, users and services need a valid token with KV
+[privileges](/consul/docs/secure/acl/rule#key-value-rules) to access the data
+store. This includes read-only access. We recommend creating a token with
+limited privileges. For example, you could create a token with write privileges
+on one key for developers to update the value related to their application.
+
+The datastore itself is located on the Consul servers in the [data directory](/consul/docs/architecture/backend). To ensure data is not lost in the event of a complete outage, use the [`consul snapshot`](/consul/commands/snapshot/restore) feature to backup the data.
+
+## Using Consul KV
+
+Objects are opaque to Consul, meaning there are no restrictions on the type of
+object stored in a key/value entry. The main restriction on an object is a
+maximum size of 512 KB. Due to the maximum object size and main use cases, you should
+not need extra storage. The general [sizing
+recommendations](/consul/docs/reference/agent#kv_max_value_size) are usually
+sufficient.
+
+Keys, like objects, are not restricted by type and can include any character.
+However, we recommend using URL-safe chars such as `[a-zA-Z0-9-._~]` with the
+exception of `/`, which can be used to help organize data. Note, `/` is
+treated like any other character and is not fixed to the file system. This means
+that including `/` in a key does not fix it to a directory structure. This model is
+similar to Amazon S3 buckets. However, `/` is still useful for organizing data
+and when recursively searching within the data store. We also recommend that you
+avoid the use of `*`, `?`, `'`, and `%` because they can cause issues when using
+the API and in shell scripts.
+
+## Using Sentinel to apply policies for Consul KV
+
+This feature requires HashiCorp Cloud Platform (HCP) or self-managed Consul Enterprise.
+
+You can also use Sentinel as a Policy-as-code framework for defining advanced key-value storage access control policies. Sentinel policies extend the ACL system in Consul beyond static "read", "write", and "deny" policies to support full conditional logic and integration with external systems. Reference the [Sentinel documentation](https://docs.hashicorp.com/sentinel/concepts) for high-level Sentinel concepts.
+
+To get started with Sentinel in Consul, refer to the [Sentinel documentation](https://docs.hashicorp.com/sentinel/consul) or [Consul documentation](/consul/docs/secure/acl/sentinel).
+
+## Extending Consul KV
+
+### Consul Template
+
+If you plan to use Consul KV as part of your configuration management process, review the [Consul Template](/consul/tutorials/developer-configuration/consul-template?utm_source=docs) tutorial on how to update configuration based on value updates in the KV. Consul Template is based on Go Templates and allows for a series of scripted actions to be initiated on value changes to a Consul key.
+
+### Watches
+
+Extend Consul KV with the use of Consul watches,
+which are a way to monitor data for updates. When an update is detected, an
+external handler is invoked. To use watches with the KV store, use the
+`key` watch type.
+
+Refer to the [Consul watches documentation](/consul/docs/automate/watch) for more information.
+
+### Consul Sessions
+
+Use Consul sessions to build distributed locks with Consul KV. Sessions act as a
+binding layer between nodes, health checks, and key/value data. The KV API
+supports an `acquire` and `release` operation. The `acquire` operation acts like
+a Check-And-Set operation. On success, Consul updates the key, increments the
+`LockIndex` and then updates the session value to reflect the session holding
+the lock. Refer to the [K/V integration
+documentation](/consul/docs/automate/session#k-v-integration) for more
+information.
+
+Refer to the following tutorials to learn how to use Consul sessions:
+
+- [Application leader
+ election](/consul/tutorials/developer-configuration/application-leader-elections)
+ to learn the process for building client-side leader elections for service
+ instances using Consul's session mechanism and the Consul key/value store.
+- [Sessions and distributed locks
+ overview](/consul/tutorials/developer-configuration/distributed-semaphore) to build distributed semaphores
+
+### Vault
+
+If you plan to use Consul KV as a backend for Vault, refer to the [Configure
+Vault cluster with Integrated Storage
+tutorial](/vault/tutorials/day-one-consul/ha-with-consul?utm_source=docs).
diff --git a/website/content/docs/dynamic-app-config/kv/store.mdx b/website/content/docs/automate/kv/store.mdx
similarity index 100%
rename from website/content/docs/dynamic-app-config/kv/store.mdx
rename to website/content/docs/automate/kv/store.mdx
diff --git a/website/content/docs/automate/native/go.mdx b/website/content/docs/automate/native/go.mdx
new file mode 100644
index 000000000000..a4d6ff1275f0
--- /dev/null
+++ b/website/content/docs/automate/native/go.mdx
@@ -0,0 +1,253 @@
+---
+layout: docs
+page_title: Service Mesh Native App Integration - Go Apps
+description: >-
+ Consul's service mesh supports native integrations of Go applications into the service mesh through a Go library. Example code demonstrates how to connect your Go applications to the service mesh.
+---
+
+# Service Mesh Native Integration for Go Applications
+
+
+
+The Connect Native golang SDK is currently deprecated and will be removed in a future Consul release.
+The SDK will be removed when the long term replacement to native application integration (such as a proxyless gRPC service mesh integration) is delivered. Refer to [GH-10339](https://github.com/hashicorp/consul/issues/10339) for additional information and to track progress toward one potential solution that is tracked as replacement functionality.
+
+
+
+We provide a library that makes it drop-in simple to integrate Consul service mesh
+with most [Go](https://golang.org/) applications. This page shows examples
+of integrating this library for accepting or establishing mesh-based
+connections. For most Go applications, Consul service mesh can be natively integrated
+in just a single line of code excluding imports and struct initialization.
+
+In addition to this, please read and understand the
+[overview of service mesh native integrations](/consul/docs/automate/native).
+In particular, after natively integrating applications with Consul service mesh,
+they must declare that they accept mesh-based connections via their service definitions.
+
+The noun _connect_ is used throughout this documentation and the Go API
+to refer to the connect subsystem that provides Consul's service mesh capabilities.
+
+## Accepting Connections
+
+-> **Note:** When calling `ConnectAuthorize()` on incoming connections this library
+will return _deny_ if `Permissions` are defined on the matching intention.
+The method is currently only suited for networking layer 4 (e.g. TCP) integration.
+
+Any server that supports TLS (HTTP, gRPC, net/rpc, etc.) can begin
+accepting mesh-based connections in just a few lines of code. For most
+existing applications, converting the server to accept mesh-based
+connections will require only a one-line change excluding imports and
+structure initialization.
+
+The
+Go library exposes a `*tls.Config` that _automatically_ communicates with
+Consul to load certificates and authorize inbound connections during the
+TLS handshake. This also automatically starts goroutines to update any
+changing certs.
+
+Example, followed by more details:
+
+```go
+import(
+ "net/http"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/consul/connect"
+)
+
+func main() {
+ // Create a Consul API client
+ client, _ := api.NewClient(api.DefaultConfig())
+
+ // Create an instance representing this service. "my-service" is the
+ // name of _this_ service. The service should be cleaned up via Close.
+ svc, _ := connect.NewService("my-service", client)
+ defer svc.Close()
+
+ // Creating an HTTP server that serves via service mesh
+ server := &http.Server{
+ Addr: ":8080",
+ TLSConfig: svc.ServerTLSConfig(),
+ // ... other standard fields
+ }
+
+ // Serve!
+ server.ListenAndServeTLS("", "")
+}
+```
+
+The first step is to create a Consul API client. This is almost always the
+default configuration with an ACL token set, since you want to communicate
+to the local agent. The default configuration will also read the ACL token
+from environment variables if set. The Go library will use this client to request certificates,
+authorize connections, and more.
+
+Next, `connect.NewService` is called to create a service structure representing
+the _currently running service_. This structure maintains all the state
+for accepting and establishing connections. An application should generally
+create one service and reuse that one service for all servers and clients.
+
+Finally, a standard `*http.Server` is created. The magic line is the `TLSConfig`
+value. This is set to a TLS configuration returned by the service structure.
+This TLS configuration is configured to automatically load certificates
+in the background, cache them, and authorize inbound connections. The service
+structure automatically handles maintaining blocking queries to update certificates
+in the background if they change.
+
+Since the service returns a standard `*tls.Config`, _any_ server that supports
+TLS can be configured. This includes gRPC, net/rpc, basic TCP, and more.
+Another example is shown below with just a plain TLS listener:
+
+```go
+import(
+ "crypto/tls"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/consul/connect"
+)
+
+func main() {
+ // Create a Consul API client
+ client, _ := api.NewClient(api.DefaultConfig())
+
+ // Create an instance representing this service. "my-service" is the
+ // name of _this_ service. The service should be cleaned up via Close.
+ svc, _ := connect.NewService("my-service", client)
+ defer svc.Close()
+
+ // Creating an HTTP server that serves via service mesh
+ listener, _ := tls.Listen("tcp", ":8080", svc.ServerTLSConfig())
+ defer listener.Close()
+
+ // Accept
+ go acceptLoop(listener)
+}
+```
+
+## HTTP Clients
+
+For Go applications that need to connect to HTTP-based upstream dependencies,
+the Go library can construct an `*http.Client` that automatically establishes
+mesh-based connections as long as Consul-based service discovery is used.
+
+Example, followed by more details:
+
+```go
+import(
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/consul/connect"
+)
+
+func main() {
+ // Create a Consul API client
+ client, _ := api.NewClient(api.DefaultConfig())
+
+ // Create an instance representing this service. "my-service" is the
+ // name of _this_ service. The service should be cleaned up via Close.
+ svc, _ := connect.NewService("my-service", client)
+ defer svc.Close()
+
+ // Get an HTTP client
+ httpClient := svc.HTTPClient()
+
+ // Perform a request, then use the standard response
+ resp, _ := httpClient.Get("https://userinfo.service.consul/user/mitchellh")
+}
+```
+
+The first step is to create a Consul API client and service. These are the
+same steps as accepting connections and are explained in detail in the
+section above. If your application is both a client and server, both the
+API client and service structure can be shared and reused.
+
+Next, we call `svc.HTTPClient()` to return a specially configured
+`*http.Client`. This client will automatically established mesh-based
+connections using Consul service discovery.
+
+Finally, we perform an HTTP `GET` request to a hypothetical userinfo service.
+The HTTP client configuration automatically sends the correct client
+certificate, verifies the server certificate, and manages background
+goroutines for updating our certificates as necessary.
+
+If the application already uses a manually constructed `*http.Client`,
+the `svc.HTTPDialTLS` function can be used to configure the
+`http.Transport.DialTLS` field to achieve equivalent behavior.
+
+### Hostname Requirements
+
+The hostname used in the request URL is used to identify the logical service
+discovery mechanism for the target. **It's not actually resolved via DNS** but
+used as a logical identifier for a Consul service discovery mechanism. It has
+the following specific limitations:
+
+- The scheme must be `https://`.
+- It must be a Consul DNS name in one of the following forms:
+ - `.service[.].consul` to discover a healthy service
+ instance for a given service.
+ - `.query[.].consul` to discover an instance via
+ [Prepared Query](/consul/api-docs/query).
+- The top-level domain _must_ be `.consul` even if your cluster has a custom
+ `domain` configured for its DNS interface. This might be relaxed in the
+ future.
+- Tag filters for services are not currently supported (i.e.
+ `tag1.web.service.consul`) however the same behavior can be achieved using a
+ prepared query.
+- External DNS names, raw IP addresses and so on will cause an error and should
+ be fetched using a separate `HTTPClient`.
+
+## Raw TLS Connection
+
+For a raw `net.Conn` TLS connection, the `svc.Dial` function can be used.
+This will establish a connection to the desired service via the service mesh and
+return the `net.Conn`. This connection can then be used as desired.
+
+Example:
+
+```go
+import (
+ "context"
+
+ "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/consul/connect"
+)
+
+func main() {
+ // Create a Consul API client
+ client, _ := api.NewClient(api.DefaultConfig())
+
+ // Create an instance representing this service. "my-service" is the
+ // name of _this_ service. The service should be cleaned up via Close.
+ svc, _ := connect.NewService("my-service", client)
+ defer svc.Close()
+
+ // Connect to the "userinfo" Consul service.
+ conn, _ := svc.Dial(context.Background(), &connect.ConsulResolver{
+ Client: client,
+ Name: "userinfo",
+ })
+}
+```
+
+This uses a familiar `Dial`-like function to establish raw `net.Conn` values.
+The second parameter to dial is an implementation of the `connect.Resolver`
+interface. The example above uses the `*connect.ConsulResolver` implementation
+to perform Consul-based service discovery. This also automatically determines
+the correct certificate metadata we expect the remote service to serve.
+
+## Static Addresses, Custom Resolvers
+
+In the raw TLS connection example, you see the use of a `connect.Resolver`
+implementation. This interface can be implemented to perform address
+resolution. This must return the address and also the URI SAN expected
+in the TLS certificate served by the remote service.
+
+The Go library provides two built-in resolvers:
+
+- `*connect.StaticResolver` can be used for static addresses where no
+ service discovery is required. The expected cert URI SAN must be
+ manually specified.
+
+- `*connect.ConsulResolver` which resolves services and prepared queries
+ via the Consul API. This also automatically determines the expected
+ cert URI SAN.
diff --git a/website/content/docs/automate/native/index.mdx b/website/content/docs/automate/native/index.mdx
new file mode 100644
index 000000000000..1b0b3f15fd34
--- /dev/null
+++ b/website/content/docs/automate/native/index.mdx
@@ -0,0 +1,165 @@
+---
+layout: docs
+page_title: Service Mesh Native App Integration - Overview
+description: >-
+ When using sidecar proxies is not possible, applications can natively integrate with Consul service mesh, but have reduced access to service mesh features. Learn how "mesh-native" or "connect-native" apps use mTLS to authenticate with Consul and how to add integrations to service registrations.
+---
+
+# Service Mesh Native App Integration Overview
+
+
+
+The Connect Native Golang SDK and `v1/agent/connect/authorize`, `v1/agent/connect/ca/leaf`,
+and `v1/agent/connect/ca/roots` APIs are deprecated and will be removed in a future release. Although Connect Native
+will still operate as designed, we do not recommend leveraging this feature because it is deprecated and will be removed
+removed when the long term replacement to native application integration (such as a proxyless gRPC service mesh integration) is delivered. Refer to [GH-10339](https://github.com/hashicorp/consul/issues/10339) for additional information and to track progress toward one potential solution that is tracked as replacement functionality.
+
+The Native App Integration does not support many of the Consul's service mesh features, and is not under active development.
+The [Envoy proxy](/consul/docs/reference/proxy/envoy) should be used for most production environments.
+
+
+
+Applications can natively integrate with Consul's service mesh API to support accepting
+and establishing connections to other mesh services without the overhead of a
+[proxy sidecar](/consul/docs/connect/proxy). This option is especially useful
+for applications that may be experiencing performance issues with the proxy
+sidecar deployment. This page will cover the high-level overview of
+integration, registering the service, etc. For language-specific examples, see
+the sidebar navigation to the left. It is also required if your service uses
+relies on a dynamic set of upstream services.
+
+Service mesh traffic is just basic mutual TLS. This means that almost any application
+can easily integrate with Consul service mesh. There is no custom protocol in use;
+any language that supports TLS can accept and establish mesh-based
+connections.
+
+We currently provide an easy-to-use [Go integration](/consul/docs/automate/native/go)
+to assist with the getting the proper certificates, verifying connections,
+etc. We plan to add helper libraries for other languages in the future.
+However, without library support, it is still possible for any major language
+to integrate with Consul service mesh.
+
+The noun _connect_ is used throughout this documentation to refer to the connect
+subsystem that provides Consul's service mesh capabilities.
+
+## Overview
+
+The primary work involved in natively integrating with service mesh is
+[acquiring the proper TLS certificate](/consul/api-docs/agent/connect#service-leaf-certificate),
+[verifying TLS certificates](/consul/api-docs/agent/connect#certificate-authority-ca-roots),
+and [authorizing inbound connections or requests](/consul/api-docs/connect/intentions#list-matching-intentions).
+
+All of this is done using the Consul HTTP APIs linked above.
+
+An overview of the sequence is shown below. The diagram and the following
+details may seem complex, but this is a _regular mutual TLS connection_ with
+an API call to verify the incoming client certificate.
+
+
+
+-> **Note:** This diagram depicts the simpler networking layer 4 (e.g. TCP) [integration
+mechanism](/consul/api-docs/agent/connect#authorize).
+
+Details on the steps are below:
+
+- **Service discovery** - This is normal service discovery using Consul,
+ a static IP, or any other mechanism. If you're using Consul DNS, the
+ [`.connect`](/consul/docs/services/discovery/dns-static-lookups#service-mesh-enabled-service-lookups)
+ syntax to find mesh-capable endpoints for a service. After service
+ discovery, choose one address from the list of **service addresses**.
+
+- **Mutual TLS** - As a client, connect to the discovered service address
+ over normal TLS. As part of the TLS connection, provide the
+ [service certificate](/consul/api-docs/agent/connect#service-leaf-certificate)
+ as the client certificate. Verify the remote certificate against the
+ [public CA roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots).
+ As a client, if the connection is established then you've established
+ a mesh-based connection and there are no further steps!
+
+- **Authorization** - As a server accepting connections, verify the client
+ certificate against the [public CA
+ roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots). After verifying
+ the certificate, parse some basic fields from it and use those to determine
+ if the connection should be allowed. How this is done is dependent on
+ the level of integration desired:
+
+ - **Simple integration (TCP-only)** - Call the [authorizing
+ API](/consul/api-docs/agent/connect#authorize) against the local agent. If this returns
+ successfully, complete the TLS handshake and establish the connection. If
+ authorization fails, close the connection.
+
+ -> **NOTE:** This API call is expected to be called in the connection path,
+ so if the local Consul agent is down or unresponsive it will effect the
+ success rate of new connections. The agent uses locally cached data to
+ authorize the connection and typically responds in microseconds. Therefore,
+ the impact to the TLS handshake is typically microseconds.
+
+ - **Complete integration** - Like how the calls to acquire the leaf
+ certificate and CA roots are expected to be done out of band and reused, so
+ should the [intention match
+ API](/consul/api-docs/connect/intentions#list-matching-intentions). With all of the
+ relevant intentions cached for the destination, all enforcement operations
+ can be done entirely by the service without calling any Consul APIs in the
+ connection or request path. If the service is networking layer 7 (e.g.
+ HTTP) aware it can safely enforce intentions per _request_ instead of the
+ coarser per _connection_ model.
+
+## Update certificates and certificate roots
+
+The leaf certificate and CA roots can be updated at any time and the
+natively integrated application must react to this relatively quickly
+so that new connections are not disrupted. This can be done through
+Consul blocking queries (HTTP long polling) or through periodic polling.
+
+The API calls for
+[acquiring a service mesh TLS certificate](/consul/api-docs/agent/connect#service-leaf-certificate)
+and [reading service mesh CA roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots)
+both support
+[blocking queries](/consul/api-docs/features/blocking). By using blocking
+queries, an application can efficiently wait for an updated value. For example,
+the leaf certificate API will block until the certificate is near expiration
+or the signing certificates have changed and will issue and return a new
+certificate.
+
+In some languages, using blocking queries may not be simple. In that case,
+we still recommend using the blocking query parameters but with a very short
+`timeout` value set. Doing this is documented with
+[blocking queries](/consul/api-docs/features/blocking). The low timeout will
+ensure the API responds quickly. We recommend that applications poll the
+certificate endpoints frequently, such as multiple times per minute.
+
+The overhead for the blocking queries (long or periodic polling) is minimal.
+The API calls are to the local agent and the local agent uses locally
+cached data multiplexed over a single TCP connection to the Consul leader.
+Even if a single machine has 1,000 mesh-enabled services all blocking
+on certificate updates, this translates to only one TCP connection to the
+Consul server.
+
+Some language libraries such as the
+[Go library](/consul/docs/automate/native/go) automatically handle updating
+and locally caching the certificates.
+
+## Service registration
+
+Mesh-native applications must tell Consul that they support service mesh
+natively. This enables the service to be returned as part of service
+discovery for service mesh-capable services used by other mesh-native applications
+and client [proxies](/consul/docs/connect/proxy).
+
+You can enable native service mesh support directly in the [service definition](/consul/docs/reference/service#connect) by configuring the `connect` block. In the following example, the `redis` service is configured to support service mesh natively:
+
+```json
+{
+ "service": {
+ "name": "redis",
+ "port": 8000,
+ "connect": {
+ "native": true
+ }
+ }
+}
+```
+
+Services that support service mesh natively are still returned through the standard
+service discovery mechanisms in addition to the mesh-only service discovery
+mechanisms.
diff --git a/website/content/docs/automate/session.mdx b/website/content/docs/automate/session.mdx
new file mode 100644
index 000000000000..eca6ff487e78
--- /dev/null
+++ b/website/content/docs/automate/session.mdx
@@ -0,0 +1,144 @@
+---
+layout: docs
+page_title: Sessions and distributed locks overview
+description: >-
+ Consul supports sessions that you can use to build distributed locks with granular locking. Learn about sessions, how they can prevent "split-brain" systems by ensuring consistency in deployments, and how they can integrate with the key/value (KV) store.
+---
+
+# Sessions and distributed locks overview
+
+Consul provides a session mechanism which can be used to build distributed locks.
+Sessions act as a binding layer between nodes, health checks, and key/value data.
+They are designed to provide granular locking and are heavily inspired by
+[The Chubby Lock Service for Loosely-Coupled Distributed Systems](https://research.google/pubs/the-chubby-lock-service-for-loosely-coupled-distributed-systems/).
+
+## Session Design
+
+A session in Consul represents a contract that has very specific semantics.
+When a session is constructed, a node name, a list of health checks, a behavior,
+a TTL, and a `lock-delay` may be provided. The newly constructed session is provided with
+a named ID that can be used to identify it. This ID can be used with the KV
+store to acquire locks: advisory mechanisms for mutual exclusion.
+
+Below is a diagram showing the relationship between these components:
+
+
+
+The contract that Consul provides is that under any of the following
+situations, the session will be _invalidated_:
+
+- Node is deregistered
+- Any of the health checks are deregistered
+- Any of the health checks go to the critical state
+- Session is explicitly destroyed
+- TTL expires, if applicable
+
+When a session is invalidated, it is destroyed and can no longer
+be used. What happens to the associated locks depends on the
+behavior specified at creation time. Consul supports a `release`
+and `delete` behavior. The `release` behavior is the default
+if none is specified.
+
+If the `release` behavior is being used, any of the locks held in
+association with the session are released, and the `ModifyIndex` of
+the key is incremented. Alternatively, if the `delete` behavior is
+used, the key corresponding to any of the held locks is simply deleted.
+This can be used to create ephemeral entries that are automatically
+deleted by Consul.
+
+While this is a simple design, it enables a multitude of usage
+patterns. By default, the
+[gossip based failure detector](/consul/docs/concept/gossip)
+is used as the associated health check. This failure detector allows
+Consul to detect when a node that is holding a lock has failed and
+to automatically release the lock. This ability provides **liveness** to
+Consul locks; that is, under failure the system can continue to make
+progress. However, because there is no perfect failure detector, it's possible
+to have a false positive (failure detected) which causes the lock to
+be released even though the lock owner is still alive. This means
+we are sacrificing some **safety**.
+
+Conversely, it is possible to create a session with no associated
+health checks. This removes the possibility of a false positive
+and trades liveness for safety. You can be absolutely certain Consul
+will not release the lock even if the existing owner has failed.
+Since Consul APIs allow a session to be force destroyed, this allows
+systems to be built that require an operator to intervene in the
+case of a failure while precluding the possibility of a split-brain.
+
+A third health checking mechanism is session TTLs. When creating
+a session, a TTL can be specified. If the TTL interval expires without
+being renewed, the session has expired and an invalidation is triggered.
+This type of failure detector is also known as a heartbeat failure detector.
+It is less scalable than the gossip based failure detector as it places
+an increased burden on the servers but may be applicable in some cases.
+The contract of a TTL is that it represents a lower bound for invalidation;
+that is, Consul will not expire the session before the TTL is reached, but it
+is allowed to delay the expiration past the TTL. The TTL is renewed on
+session creation, on session renew, and on leader failover. When a TTL
+is being used, clients should be aware of clock skew issues: namely,
+time may not progress at the same rate on the client as on the Consul servers.
+It is best to set conservative TTL values and to renew in advance of the TTL
+to account for network delay and time skew.
+
+The final nuance is that sessions may provide a `lock-delay`. This
+is a time duration, between 0 and 60 seconds. When a session invalidation
+takes place, Consul prevents any of the previously held locks from
+being re-acquired for the `lock-delay` interval; this is a safeguard
+inspired by Google's Chubby. The purpose of this delay is to allow
+the potentially still live leader to detect the invalidation and stop
+processing requests that may lead to inconsistent state. While not a
+bulletproof method, it does avoid the need to introduce sleep states
+into application logic and can help mitigate many issues. While the
+default is to use a 15 second delay, clients are able to disable this
+mechanism by providing a zero delay value.
+
+## K/V Integration
+
+Integration between the KV store and sessions is the primary
+place where sessions are used. A session must be created prior to use
+and is then referred to by its ID.
+
+The KV API is extended to support an `acquire` and `release` operation.
+The `acquire` operation acts like a Check-And-Set operation except it
+can only succeed if there is no existing lock holder (the current lock holder
+can re-`acquire`, see below). On success, there is a normal key update, but
+there is also an increment to the `LockIndex`, and the `Session` value is
+updated to reflect the session holding the lock.
+
+If the lock is already held by the given session during an `acquire`, then
+the `LockIndex` is not incremented but the key contents are updated. This
+lets the current lock holder update the key contents without having to give
+up the lock and reacquire it.
+
+Once held, the lock can be released using a corresponding `release` operation,
+providing the same session. Again, this acts like a Check-And-Set operation
+since the request will fail if given an invalid session. A critical note is
+that the lock can be released without being the creator of the session.
+This is by design as it allows operators to intervene and force-terminate
+a session if necessary. As mentioned above, a session invalidation will also
+cause all held locks to be released or deleted. When a lock is released, the `LockIndex`
+does not change; however, the `Session` is cleared and the `ModifyIndex` increments.
+
+These semantics (heavily borrowed from Chubby), allow the tuple of (Key, LockIndex, Session)
+to act as a unique "sequencer". This `sequencer` can be passed around and used
+to verify if the request belongs to the current lock holder. Because the `LockIndex`
+is incremented on each `acquire`, even if the same session re-acquires a lock,
+the `sequencer` will be able to detect a stale request. Similarly, if a session is
+invalided, the Session corresponding to the given `LockIndex` will be blank.
+
+To be clear, this locking system is purely _advisory_. There is no enforcement
+that clients must acquire a lock to perform any operation. Any client can
+read, write, and delete a key without owning the corresponding lock. It is not
+the goal of Consul to protect against misbehaving clients.
+
+## Leader Election
+
+You can use the primitives provided by sessions and the locking mechanisms of the KV
+store to build client-side leader election algorithms.
+These are covered in more detail in the [Leader Election guide](/consul/docs/automate/application-leader-election).
+
+## Prepared Query Integration
+
+Prepared queries may be attached to a session in order to automatically delete
+the prepared query when the session is invalidated.
\ No newline at end of file
diff --git a/website/content/docs/automate/watch.mdx b/website/content/docs/automate/watch.mdx
new file mode 100644
index 000000000000..83b3574e5e88
--- /dev/null
+++ b/website/content/docs/automate/watch.mdx
@@ -0,0 +1,693 @@
+---
+layout: docs
+page_title: Watches overview
+description: >-
+ Watches monitor the key/value (KV) store, services, nodes, health checks, and events for updates. When a watch detects a change, it invokes a handler that can call an HTTP endpoint or runs an executable. Learn how to configure watches to dynamically respond to changes in Consul.
+---
+
+# Watches overview
+
+Watches are a way of specifying a view of data (e.g. list of nodes, KV pairs, health
+checks) which is monitored for updates. When an update is detected, an external handler
+is invoked. A handler can be any executable or HTTP endpoint. As an example, you could watch the status
+of health checks and notify an external system when a check is critical.
+
+Watches are implemented using blocking queries in the [HTTP API](/consul/api-docs).
+Agents automatically make the proper API calls to watch for changes
+and inform a handler when the data view has updated.
+
+Watches can be configured as part of the [agent's configuration](/consul/docs/reference/agent/configuration-file/general#watches),
+causing them to run once the agent is initialized. Reloading the agent configuration
+allows for adding or removing watches dynamically.
+
+Alternatively, the [watch command](/consul/commands/watch) enables a watch to be
+started outside of the agent. This can be used by an operator to inspect data in Consul
+or to easily pipe data into processes without being tied to the agent lifecycle.
+
+In either case, the `type` of the watch must be specified. Each type of watch
+supports different parameters, some required and some optional. These options are specified
+in a JSON body when using agent configuration or as CLI flags for the watch command.
+
+## Handlers
+
+The watch configuration specifies the view of data to be monitored.
+Once that view is updated, the specified handler is invoked. Handlers can be either an
+executable or an HTTP endpoint. A handler receives JSON formatted data
+with invocation info, following a format that depends on the type of the watch.
+Each watch type documents the format type. Because they map directly to an HTTP
+API, handlers should expect the input to match the format of the API. A Consul
+index is also given, corresponding to the responses from the
+[HTTP API](/consul/api-docs).
+
+### Executable
+
+An executable handler reads the JSON invocation info from stdin. Additionally,
+the `CONSUL_INDEX` environment variable will be set to the Consul index.
+Anything written to stdout is logged.
+
+Here is an example configuration, where `handler_type` is optionally set to
+`script`:
+
+
+
+
+```hcl
+watches = [
+ {
+ type = "key"
+ key = "foo/bar/baz"
+ handler_type = "script"
+ args = ["/usr/bin/my-service-handler.sh", "-redis"]
+ }
+]
+```
+
+
+
+
+
+```json
+{
+ "watches": [
+ {
+ "type": "key",
+ "key": "foo/bar/baz",
+ "handler_type": "script",
+ "args": ["/usr/bin/my-service-handler.sh", "-redis"]
+ }
+ ]
+}
+```
+
+
+
+
+
+Prior to Consul 1.0, watches used a single `handler` field to define the command to run, and
+would always run in a shell. In Consul 1.0, the `args` array was added so that handlers can be
+run without a shell. The `handler` field is deprecated, and you should include the shell in
+the `args` to run under a shell, eg. `"args": ["sh", "-c", "..."]`.
+
+### HTTP endpoint
+
+An HTTP handler sends an HTTP request when a watch is invoked. The JSON invocation info is sent
+as a payload along the request. The response also contains the Consul index as a header named
+`X-Consul-Index`.
+
+The HTTP handler can be configured by setting `handler_type` to `http`. Additional handler options
+are set using `http_handler_config`. The only required parameter is the `path` field which specifies
+the URL to the HTTP endpoint. Consul uses `POST` as the default HTTP method, but this is also configurable.
+Other optional fields are `header`, `timeout` and `tls_skip_verify`. The watch invocation data is
+always sent as a JSON payload.
+
+Here is an example configuration:
+
+
+
+
+```hcl
+watches = [
+ {
+ type = "key"
+ key = "foo/bar/baz"
+ handler_type = "http"
+ http_handler_config {
+ path = "https://localhost:8000/watch"
+ method = "POST"
+ header = {
+ x-foo = ["bar", "baz"]
+ }
+ timeout = "10s"
+ tls_skip_verify = false
+ }
+ }
+]
+```
+
+
+
+
+```json
+{
+ "watches": [
+ {
+ "type": "key",
+ "key": "foo/bar/baz",
+ "handler_type": "http",
+ "http_handler_config": {
+ "path": "https://localhost:8000/watch",
+ "method": "POST",
+ "header": { "x-foo": ["bar", "baz"] },
+ "timeout": "10s",
+ "tls_skip_verify": false
+ }
+ }
+ ]
+}
+```
+
+
+
+
+## Global Parameters
+
+In addition to the parameters supported by each option type, there
+are a few global parameters that all watches support:
+
+- `datacenter` - Can be provided to override the agent's default datacenter.
+- `token` - Can be provided to override the agent's default ACL token.
+- `args` - The handler subprocess and arguments to invoke when the data view updates.
+- `handler` - The handler shell command to invoke when the data view updates.
+
+## Watch Types
+
+The following types are supported. Detailed documentation on each is below:
+
+- [`key`](#key) - Watch a specific KV pair
+- [`keyprefix`](#keyprefix) - Watch a prefix in the KV store
+- [`services`](#services) - Watch the list of available services
+- [`nodes`](#nodes) - Watch the list of nodes
+- [`service`](#service)- Watch the instances of a service
+- [`checks`](#checks) - Watch the value of health checks
+- [`event`](#event) - Watch for custom user events
+
+### Type: key ((#key))
+
+The "key" watch type is used to watch a specific key in the KV store.
+It requires that the `key` parameter be specified.
+
+This maps to the `/v1/kv/` API internally.
+
+Here is an example configuration:
+
+
+
+```hcl
+{
+ type = "key"
+ key = "foo/bar/baz"
+ args = ["/usr/bin/my-service-handler.sh", "-redis"]
+}
+```
+
+```json
+{
+ "type": "key",
+ "key": "foo/bar/baz",
+ "args": ["/usr/bin/my-service-handler.sh", "-redis"]
+}
+```
+
+
+
+Or, using the watch command:
+
+```shell-session
+$ consul watch -type=key -key=foo/bar/baz /usr/bin/my-key-handler.sh
+```
+
+An example of the output of this command:
+
+```json
+{
+ "Key": "foo/bar/baz",
+ "CreateIndex": 1793,
+ "ModifyIndex": 1793,
+ "LockIndex": 0,
+ "Flags": 0,
+ "Value": "aGV5",
+ "Session": ""
+}
+```
+
+### Type: keyprefix ((#keyprefix))
+
+The `keyprefix` watch type is used to watch a prefix of keys in the KV store.
+It requires that the `prefix` parameter be specified. This watch
+returns _all_ keys matching the prefix whenever _any_ key matching the prefix
+changes.
+
+This maps to the `/v1/kv/` API internally.
+
+Here is an example configuration:
+
+
+
+```hcl
+{
+ type = "keyprefix"
+ prefix = "foo/"
+ args = ["/usr/bin/my-prefix-handler.sh", "-redis"]
+}
+```
+
+```json
+{
+ "type": "keyprefix",
+ "prefix": "foo/",
+ "args": ["/usr/bin/my-prefix-handler.sh", "-redis"]
+}
+```
+
+
+
+Or, using the watch command:
+
+```shell-session
+$ consul watch -type=keyprefix -prefix=foo/ /usr/bin/my-prefix-handler.sh
+```
+
+An example of the output of this command:
+
+```json
+[
+ {
+ "Key": "foo/bar",
+ "CreateIndex": 1796,
+ "ModifyIndex": 1796,
+ "LockIndex": 0,
+ "Flags": 0,
+ "Value": "TU9BUg==",
+ "Session": ""
+ },
+ {
+ "Key": "foo/baz",
+ "CreateIndex": 1795,
+ "ModifyIndex": 1795,
+ "LockIndex": 0,
+ "Flags": 0,
+ "Value": "YXNkZg==",
+ "Session": ""
+ },
+ {
+ "Key": "foo/test",
+ "CreateIndex": 1793,
+ "ModifyIndex": 1793,
+ "LockIndex": 0,
+ "Flags": 0,
+ "Value": "aGV5",
+ "Session": ""
+ }
+]
+```
+
+### Type: services ((#services))
+
+The "services" watch type is used to watch the list of available
+services. It has no parameters.
+
+This maps to the `/v1/catalog/services` API internally.
+
+Below is an example configuration:
+
+
+
+```hcl
+{
+ type = "services"
+ args = ["/usr/bin/my-services-handler.sh"]
+}
+```
+
+```json
+{
+ "type": "services",
+ "args": ["/usr/bin/my-services-handler.sh"]
+}
+```
+
+
+
+Or, using the watch command:
+
+```shell-session
+$ consul watch -type=services /usr/bin/my-services-handler.sh
+```
+
+An example of the output of this command:
+
+```json
+{
+ "consul": [],
+ "redis": [],
+ "web": []
+}
+```
+
+### Type: nodes ((#nodes))
+
+The "nodes" watch type is used to watch the list of available
+nodes. It has no parameters.
+
+This maps to the `/v1/catalog/nodes` API internally.
+
+Below is an example configuration:
+
+
+
+```hcl
+{
+ type = "nodes"
+ args = ["/usr/bin/my-nodes-handler.sh"]
+}
+```
+
+```json
+{
+ "type": "nodes",
+ "args": ["/usr/bin/my-nodes-handler.sh"]
+}
+```
+
+
+
+Or, using the watch command:
+
+```shell-session
+$ consul watch -type=nodes /usr/bin/my-nodes-handler.sh
+```
+
+An example of the output of this command:
+
+```json
+[
+ {
+ "ID": "8d3088b5-ce7d-0b94-f185-ae70c3445642",
+ "Node": "nyc1-consul-1",
+ "Address": "192.0.2.10",
+ "Datacenter": "dc1",
+ "TaggedAddresses": null,
+ "Meta": null,
+ "CreateIndex": 23792324,
+ "ModifyIndex": 23792324
+ },
+ {
+ "ID": "1edb564e-65ee-9e60-5e8a-83eae4637357",
+ "Node": "nyc1-worker-1",
+ "Address": "192.0.2.20",
+ "Datacenter": "dc1",
+ "TaggedAddresses": {
+ "lan": "192.0.2.20",
+ "lan_ipv4": "192.0.2.20",
+ "wan": "192.0.2.20",
+ "wan_ipv4": "192.0.2.20"
+ },
+ "Meta": {
+ "consul-network-segment": "",
+ "host-ip": "192.0.2.20",
+ "pod-name": "hashicorp-consul-q7nth"
+ },
+ "CreateIndex": 23792336,
+ "ModifyIndex": 23792338
+ }
+]
+```
+
+### Type: service ((#service))
+
+The "service" watch type is used to monitor the providers
+of a single service. It requires the `service` parameter
+and optionally takes the parameters `tag` and
+`passingonly`. The `tag` parameter will filter by one or more tags.
+It may be either a single string value or a slice of strings.
+The `passingonly` parameter is a boolean that will filter to only the
+instances passing all health checks.
+
+This maps to the `/v1/health/service` API internally.
+
+Here is an example configuration with a single tag:
+
+
+
+```hcl
+{
+ type = "service"
+ service = "redis"
+ args = ["/usr/bin/my-service-handler.sh", "-redis"]
+ tag = "bar"
+}
+```
+
+```json
+{
+ "type": "service",
+ "service": "redis",
+ "args": ["/usr/bin/my-service-handler.sh", "-redis"],
+ "tag": "bar"
+}
+```
+
+
+
+Here is an example configuration with multiple tags:
+
+
+
+```hcl
+{
+ type = "service"
+ service = "redis"
+ args = ["/usr/bin/my-service-handler.sh", "-redis"]
+ tag = ["bar", "foo"]
+}
+```
+
+```json
+{
+ "type": "service",
+ "service": "redis",
+ "args": ["/usr/bin/my-service-handler.sh", "-redis"],
+ "tag": ["bar", "foo"]
+}
+```
+
+
+
+Or, using the watch command:
+
+Single tag:
+
+```shell-session
+$ consul watch -type=service -service=redis -tag=bar /usr/bin/my-service-handler.sh
+```
+
+Multiple tags:
+
+```shell-session
+$ consul watch -type=service -service=redis -tag=bar -tag=foo /usr/bin/my-service-handler.sh
+```
+
+An example of the output of this command:
+
+```json
+[
+ {
+ "Node": {
+ "ID": "f013522f-aaa2-8fc6-c8ac-c84cb8a56405",
+ "Node": "hashicorp-consul-server-1",
+ "Address": "192.0.2.50",
+ "Datacenter": "dc1",
+ "TaggedAddresses": null,
+ "Meta": null,
+ "CreateIndex": 23785783,
+ "ModifyIndex": 23785783
+ },
+ "Service": {
+ "ID": "redis",
+ "Service": "redis",
+ "Tags": [],
+ "Meta": null,
+ "Port": 6379,
+ "Address": "",
+ "Weights": {
+ "Passing": 1,
+ "Warning": 1
+ },
+ "EnableTagOverride": false,
+ "CreateIndex": 23785794,
+ "ModifyIndex": 23785794,
+ "Proxy": {
+ "MeshGateway": {},
+ "Expose": {}
+ },
+ "Connect": {}
+ },
+ "Checks": [
+ {
+ "Node": "hashicorp-consul-server-1",
+ "CheckID": "serfHealth",
+ "Name": "Serf Health Status",
+ "Status": "passing",
+ "Notes": "",
+ "Output": "Agent alive and reachable",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [],
+ "Type": "",
+ "Definition": {
+ "Interval": "0s",
+ "Timeout": "0s",
+ "DeregisterCriticalServiceAfter": "0s",
+ "HTTP": "",
+ "Header": null,
+ "Method": "",
+ "Body": "",
+ "TLSServerName": "",
+ "TLSSkipVerify": false,
+ "TCP": "",
+ "TCPUseTLS": false,
+ "GRPC": "",
+ "GRPCUseTLS": false
+ },
+ "CreateIndex": 23785783,
+ "ModifyIndex": 23791503
+ }
+ ]
+ }
+]
+```
+
+### Type: checks ((#checks))
+
+The "checks" watch type is used to monitor the checks of a given
+service or those in a specific state. It optionally takes the `service`
+parameter to filter to a specific service or the `state` parameter to
+filter to a specific state. By default, it will watch all checks.
+
+This maps to the `/v1/health/state/` API if monitoring by state
+or `/v1/health/checks/` if monitoring by service.
+
+Here is an example configuration for monitoring by state:
+
+
+
+```hcl
+{
+ type = "checks"
+ state = "passing"
+ args = ["/usr/bin/my-check-handler.sh", "-passing"]
+}
+```
+
+```json
+{
+ "type": "checks",
+ "state": "passing",
+ "args": ["/usr/bin/my-check-handler.sh", "-passing"]
+}
+```
+
+
+
+Here is an example configuration for monitoring by service:
+
+
+
+```hcl
+{
+ type = "checks"
+ service = "redis"
+ args = ["/usr/bin/my-check-handler.sh", "-redis"]
+}
+```
+
+```json
+{
+ "type": "checks",
+ "service": "redis",
+ "args": ["/usr/bin/my-check-handler.sh", "-redis"]
+}
+```
+
+
+
+Or, using the watch command:
+
+State:
+
+```shell-session
+$ consul watch -type=checks -state=passing /usr/bin/my-check-handler.sh -passing
+```
+
+Service:
+
+```shell-session
+$ consul watch -type=checks -service=redis /usr/bin/my-check-handler.sh -redis
+```
+
+An example of the output of this command:
+
+```json
+[
+ {
+ "Node": "foobar",
+ "CheckID": "service:redis",
+ "Name": "Service 'redis' check",
+ "Status": "passing",
+ "Notes": "",
+ "Output": "",
+ "ServiceID": "redis",
+ "ServiceName": "redis"
+ }
+]
+```
+
+### Type: event ((#event))
+
+The "event" watch type is used to monitor for custom user
+events. These are fired using the [consul event](/consul/commands/event) command.
+It takes only a single optional `name` parameter which restricts
+the watch to only events with the given name.
+
+This maps to the `/v1/event/list` API internally.
+
+Here is an example configuration:
+
+
+
+```hcl
+{
+ type = "event"
+ name = "web-deploy"
+ args = ["/usr/bin/my-event-handler.sh", "-web-deploy"]
+}
+```
+
+```json
+{
+ "type": "event",
+ "name": "web-deploy",
+ "args": ["/usr/bin/my-event-handler.sh", "-web-deploy"]
+}
+```
+
+
+
+Or, using the watch command:
+
+```shell-session
+$ consul watch -type=event -name=web-deploy /usr/bin/my-event-handler.sh -web-deploy
+```
+
+An example of the output of this command:
+
+```json
+[
+ {
+ "ID": "f07f3fcc-4b7d-3a7c-6d1e-cf414039fcee",
+ "Name": "web-deploy",
+ "Payload": "MTYwOTAzMA==",
+ "NodeFilter": "",
+ "ServiceFilter": "",
+ "TagFilter": "",
+ "Version": 1,
+ "LTime": 18
+ }
+]
+```
+
+To fire a new `web-deploy` event the following could be used:
+
+```shell-session
+$ consul event -name=web-deploy 1609030
+```
diff --git a/website/content/docs/concept/catalog.mdx b/website/content/docs/concept/catalog.mdx
new file mode 100644
index 000000000000..07162b3d4cca
--- /dev/null
+++ b/website/content/docs/concept/catalog.mdx
@@ -0,0 +1,39 @@
+---
+layout: docs
+page_title: Consul catalog
+description: Learn about version 1 of the Consul catalog, including what Consul servers record when they register a service.
+---
+
+# Consul catalog
+
+This topic provides conceptual information about the Consul catalog API. The catalog tracks registered services and their locations for both service discovery and service mesh use cases.
+
+For more information about the information returned when querying the catalog, including filtering options when querying the catalog for a list of nodes, services, or gateways, refer to the [`/catalog` endpoint reference in the HTTP API documentation](/consul/api-docs/catalog).
+
+## Introduction
+
+Consul tracks information about registered services through its catalog API. This API records user-defined information about the external services, such as their partitions and required health checks. It also records information that Consul assigns for its own operations, such as an ID for each service instance and the [Raft indices](/consul/docs/concept/consensus) when the instance is registered and modified.
+
+### v2 Catalog
+
+Consul introduced an experimental v2 Catalog API in v1.17.0. This API supported multi-port Service configurations on Kubernetes, and it was made available for testing and development purposes. The v2 catalog and its support for multiport Kubernetes Services were deprecated in the v1.19.0 release.
+
+## Catalog structure
+
+When Consul registers a service instance using the v1 catalog API, it records the following information about each instance:
+
+| v1 Catalog field | Description | Source |
+| :--------------- | :---------- | :----- |
+| ID | A unique identifier for a service instance. | Defined by user in [service definition](/consul/docs/reference/service#id). |
+| Node | The connection point where the service is available. | On VMs, defined by user.
On Kubernetes, computed by Consul according to [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/). |
+| Address | The registered address of the service instance. | Defined by user in [service definition](/consul/docs/reference/service#address). |
+| Tagged Addresses | User-defined labels for addresses. | Defined by user in [service definition](/consul/docs/reference/service#tagged_addresses). |
+| NodeMeta | User-defined metadata about the node. | Defined by user |
+| Datacenter | The name of the datacenter the service is registered in. | Defined by user |
+| Service | The name of the service Consul registers the service instance under. | Defined by user |
+| Agent Check | The health checks defined for a service instance managed by a Consul client agent. | Computed by Consul |
+| Health Checks | The health checks defined for the service. Refer to [define health checks](/consul/docs/register/health-check/vm) for more information. | Defined by user |
+| Partition | The name of the admin partition the service is registered in. Refer to [admin partitions](/consul/docs/multi-tenant/admin-partition) for more information. | Defined by user |
+| Locality | Region and availability zone of the service. Refer to [`locality`](/consul/docs/reference/agent/configuration-file/service-mesh#locality) for more information. | Defined by user |
+
+Depending on the configuration entries or custom resource definitions you apply to your Consul installation, additional information such as [proxy default behavior](/consul/docs/reference/config-entry/proxy-defaults) is automatically recorded to the catalog for services. You can return this information using the [`/catalog` HTTP API endpoint](/consul/api-docs/catalog).
diff --git a/website/content/docs/concept/consensus.mdx b/website/content/docs/concept/consensus.mdx
new file mode 100644
index 000000000000..dffd48a50c4b
--- /dev/null
+++ b/website/content/docs/concept/consensus.mdx
@@ -0,0 +1,135 @@
+---
+layout: docs
+page_title: Consensus
+description: >-
+ Consul ensures a consistent state using the Raft protocol. A quorum, or a majority of server agents with one leader, agree to state changes before committing to the state log. Learn how Raft works in Consul to ensure state consistency and how that state can be read with different consistency modes to balance read latency and consistency.
+---
+
+# Consensus
+
+Consul uses a [consensus protocol]()
+to provide [Consistency (as defined by CAP)](https://en.wikipedia.org/wiki/CAP_theorem).
+The consensus protocol is based on
+["Raft: In search of an Understandable Consensus Algorithm"](https://raft.github.io/raft.pdf).
+For a visual explanation of Raft, see [The Secret Lives of Data](http://thesecretlivesofdata.com/raft).
+
+## Raft Protocol Overview
+
+Raft is a consensus algorithm that is based on
+[Paxos](https://en.wikipedia.org/wiki/Paxos_%28computer_science%29). Compared
+to Paxos, Raft is designed to have fewer states and a simpler, more
+understandable algorithm.
+
+There are a few key terms to know when discussing Raft:
+
+- Log - The primary unit of work in a Raft system is a log entry. The problem
+ of consistency can be decomposed into a _replicated log_. A log is an ordered
+ sequence of entries. Entries includes any cluster change: adding nodes, adding services, new key-value pairs, etc. We consider the log consistent
+ if all members agree on the entries and their order.
+
+- FSM - [Finite State Machine](https://en.wikipedia.org/wiki/Finite-state_machine).
+ An FSM is a collection of finite states with transitions between them. As new logs
+ are applied, the FSM is allowed to transition between states. Application of the
+ same sequence of logs must result in the same state, meaning behavior must be deterministic.
+
+- Peer set - The peer set is the set of all members participating in log replication.
+ For Consul's purposes, all server nodes are in the peer set of the local datacenter.
+
+- Quorum - A quorum is a majority of members from a peer set: for a set of size `N`,
+ quorum requires at least `(N/2)+1` members.
+ For example, if there are 5 members in the peer set, we would need 3 nodes
+ to form a quorum. If a quorum of nodes is unavailable for any reason, the
+ cluster becomes _unavailable_ and no new logs can be committed.
+
+- Committed Entry - An entry is considered _committed_ when it is durably stored
+ on a quorum of nodes. Once an entry is committed it can be applied.
+
+- Leader - At any given time, the peer set elects a single node to be the leader.
+ The leader is responsible for ingesting new log entries, replicating to followers,
+ and managing when an entry is considered committed.
+
+Raft is a complex protocol and will not be covered here in detail (for those who
+desire a more comprehensive treatment, the full specification is available in this
+[paper](https://raft.github.io/raft.pdf)).
+We will, however, attempt to provide a high level description which may be useful
+for building a mental model.
+
+Raft nodes are always in one of three states: follower, candidate, or leader. All
+nodes initially start out as a follower. In this state, nodes can accept log entries
+from a leader and cast votes. If no entries are received for some time, nodes
+self-promote to the candidate state. In the candidate state, nodes request votes from
+their peers. If a candidate receives a quorum of votes, then it is promoted to a leader.
+The leader must accept new log entries and replicate to all the other followers.
+In addition, if stale reads are not acceptable, all queries must also be performed on
+the leader.
+
+Once a cluster has a leader, it is able to accept new log entries. A client can
+request that a leader append a new log entry (from Raft's perspective, a log entry
+is an opaque binary blob). The leader then writes the entry to durable storage and
+attempts to replicate to a quorum of followers. Once the log entry is considered
+_committed_, it can be _applied_ to a finite state machine. The finite state machine
+is application specific; in Consul's case, we use
+[MemDB](https://github.com/hashicorp/go-memdb) to maintain cluster state. Consul's writes
+block until it is both _committed_ and _applied_. This achieves read after write semantics
+when used with the [consistent](/consul/api-docs/features/consistency#consistent) mode for queries.
+
+Obviously, it would be undesirable to allow a replicated log to grow in an unbounded
+fashion. Raft provides a mechanism by which the current state is snapshotted and the
+log is compacted. Because of the FSM abstraction, restoring the state of the FSM must
+result in the same state as a replay of old logs. This allows Raft to capture the FSM
+state at a point in time and then remove all the logs that were used to reach that
+state. This is performed automatically without user intervention and prevents unbounded
+disk usage while also minimizing time spent replaying logs. One of the advantages of
+using MemDB is that it allows Consul to continue accepting new transactions even while
+old state is being snapshotted, preventing any availability issues.
+
+Consensus is fault-tolerant up to the point where quorum is available.
+If a quorum of nodes is unavailable, it is impossible to process log entries or reason
+about peer membership. For example, suppose there are only 2 peers: A and B. The quorum
+size is also 2, meaning both nodes must agree to commit a log entry. If either A or B
+fails, it is now impossible to reach quorum. This means the cluster is unable to add
+or remove a node or to commit any additional log entries. This results in
+_unavailability_. At this point, manual intervention would be required to remove
+either A or B and to restart the remaining node in bootstrap mode.
+
+A Raft cluster of 3 nodes can tolerate a single node failure while a cluster
+of 5 can tolerate 2 node failures. The recommended configuration is to either
+run 3 or 5 Consul servers per datacenter. This maximizes availability without
+greatly sacrificing performance. The [deployment table](/concept/reliability#deployment-size)
+summarizes the potential cluster size options and the fault tolerance of each.
+
+In terms of performance, Raft is comparable to Paxos. Assuming stable leadership,
+committing a log entry requires a single round trip to half of the cluster.
+Thus, performance is bound by disk I/O and network latency. Although Consul is
+not designed to be a high-throughput write system, it should handle on the order
+of hundreds to thousands of transactions per second depending on network and
+hardware configuration.
+
+## Raft in Consul
+
+Only Consul server nodes participate in Raft and are part of the peer set. All
+client nodes forward requests to servers. Part of the reason for this design is
+that, as more members are added to the peer set, the size of the quorum also increases.
+This introduces performance problems as you may be waiting for hundreds of machines
+to agree on an entry instead of a handful.
+
+When getting started, a single Consul server is put into "bootstrap" mode. This mode
+allows it to self-elect as a leader. Once a leader is elected, other servers can be
+added to the peer set in a way that preserves consistency and safety. Eventually,
+once the first few servers are added, bootstrap mode can be disabled. See [this
+document](/consul/docs/deploy/server/vm/bootstrap) for more details.
+
+Since all servers participate as part of the peer set, they all know the current
+leader. When an RPC request arrives at a non-leader server, the request is
+forwarded to the leader. If the RPC is a _query_ type, meaning it is read-only,
+the leader generates the result based on the current state of the FSM. If
+the RPC is a _transaction_ type, meaning it modifies state, the leader
+generates a new log entry and applies it using Raft. Once the log entry is committed
+and applied to the FSM, the transaction is complete.
+
+Because of the nature of Raft's replication, performance is sensitive to network
+latency. For this reason, each datacenter elects an independent leader and maintains
+a disjoint peer set. Data is partitioned by datacenter, so each leader is responsible
+only for data in their datacenter. When a request is received for a remote datacenter,
+the request is forwarded to the correct leader. This design allows for lower latency
+transactions and higher availability without sacrificing consistency.
diff --git a/website/content/docs/concept/consistency.mdx b/website/content/docs/concept/consistency.mdx
new file mode 100644
index 000000000000..61ee3ac0f902
--- /dev/null
+++ b/website/content/docs/concept/consistency.mdx
@@ -0,0 +1,234 @@
+---
+layout: docs
+page_title: Consistency
+description: >-
+ Anti-entropy keeps distributed systems consistent. Learn how Consul uses an anti-entropy mechanism to periodically sync agent states with the service catalog to prevent the catalog from becoming stale. Learn about the Jepsen testing performed on Consul to ensure it gracefully recovers from partitions and maintains consistent state.
+---
+
+# Consistency
+
+Consul uses an advanced method of maintaining service and health information.
+This page details how services and checks are registered, how the catalog is
+populated, and how health status information is updated as it changes.
+
+## Anti-Entropy
+
+Entropy is the tendency of systems to become increasingly disordered. Consul's
+anti-entropy mechanisms are designed to counter this tendency, to keep the
+state of the cluster ordered even through failures of its components.
+
+Consul has a clear separation between the global service catalog and the agent's
+local state as discussed above. The anti-entropy mechanism reconciles these two
+views of the world: anti-entropy is a synchronization of the local agent state and
+the catalog. For example, when a user registers a new service or check with the
+agent, the agent in turn notifies the catalog that this new check exists.
+Similarly, when a check is deleted from the agent, it is consequently removed from
+the catalog as well.
+
+Anti-entropy is also used to update availability information. As agents run
+their health checks, their status may change in which case their new status
+is synced to the catalog. Using this information, the catalog can respond
+intelligently to queries about its nodes and services based on their
+availability.
+
+During this synchronization, the catalog is also checked for correctness. If
+any services or checks exist in the catalog that the agent is not aware of, they
+will be automatically removed to make the catalog reflect the proper set of
+services and health information for that agent. Consul treats the state of the
+agent as authoritative; if there are any differences between the agent
+and catalog view, the agent-local view will always be used.
+
+### Periodic Synchronization
+
+In addition to running when changes to the agent occur, anti-entropy is also a
+long-running process which periodically wakes up to sync service and check
+status to the catalog. This ensures that the catalog closely matches the agent's
+true state. This also allows Consul to re-populate the service catalog even in
+the case of complete data loss.
+
+To avoid saturation, the amount of time between periodic anti-entropy runs will
+vary based on cluster size. The table below defines the relationship between
+cluster size and sync interval:
+
+| Cluster Size | Periodic Sync Interval |
+| ------------ | ---------------------- |
+| 1 - 128 | 1 minute |
+| 129 - 256 | 2 minutes |
+| 257 - 512 | 3 minutes |
+| 513 - 1024 | 4 minutes |
+| ... | ... |
+
+The intervals above are approximate. Each Consul agent will choose a randomly
+staggered start time within the interval window to avoid a thundering herd.
+
+### Best-effort sync
+
+Anti-entropy can fail in a number of cases, including misconfiguration of the
+agent or its operating environment, I/O problems (full disk, filesystem
+permission, etc.), networking problems (agent cannot communicate with server),
+among others. Because of this, the agent attempts to sync in best-effort
+fashion.
+
+If an error is encountered during an anti-entropy run, the error is logged and
+the agent continues to run. The anti-entropy mechanism is run periodically to
+automatically recover from these types of transient failures.
+
+### Enable Tag Override
+
+Synchronization of service registration can be partially modified to
+allow external agents to change the tags for a service. This can be
+useful in situations where an external monitoring service needs to be
+the source of truth for tag information. For example, the Redis
+database and its monitoring service Redis Sentinel have this kind of
+relationship. Redis instances are responsible for much of their
+configuration, but Sentinels determine whether the Redis instance is a
+primary or a secondary. Enable the
+[`enable_tag_override`](/consul/docs/reference/service#enable_tag_override) parameter in your service definition file to tell the Consul agent where the Redis database is running to bypass
+tags during anti-entropy synchronization. Refer to
+[Modify anti-entropy synchronization](/consul/docs/services/usage/define-services#modify-anti-entropy-synchronization) for additional information.
+
+## Consistency Modes
+
+Although all writes to the replicated log go through Raft, reads are more
+flexible. To support various trade-offs that developers may want, Consul
+supports 3 different consistency modes for reads.
+
+The three read modes are:
+
+- `default` - Raft makes use of leader leasing, providing a time window
+ in which the leader assumes its role is stable. However, if a leader
+ is partitioned from the remaining peers, a new leader may be elected
+ while the old leader is holding the lease. This means there are 2 leader
+ nodes. There is no risk of a split-brain since the old leader will be
+ unable to commit new logs. However, if the old leader services any reads,
+ the values are potentially stale. The default consistency mode relies only
+ on leader leasing, exposing clients to potentially stale values. We make
+ this trade-off because reads are fast, usually strongly consistent, and
+ only stale in a hard-to-trigger situation. The time window of stale reads
+ is also bounded since the leader will step down due to the partition.
+
+- `consistent` - This mode is strongly consistent without caveats. It requires
+ that a leader verify with a quorum of peers that it is still leader. This
+ introduces an additional round-trip to all server nodes. The trade-off is
+ always consistent reads but increased latency due to the extra round trip.
+
+- `stale` - This mode allows any server to service the read regardless of whether
+ it is the leader. This means reads can be arbitrarily stale but are generally
+ within 50 milliseconds of the leader. The trade-off is very fast and scalable
+ reads but with stale values. This mode allows reads without a leader meaning
+ a cluster that is unavailable will still be able to respond.
+
+For more documentation about using these various modes, see the
+[HTTP API](/consul/api-docs/features/consistency).
+
+## Jepsen Testing Results
+
+[Jepsen](http://aphyr.com/posts/281-call-me-maybe-carly-rae-jepsen-and-the-perils-of-network-partitions)
+is a tool, written by Kyle Kingsbury, designed to test the partition
+tolerance of distributed systems. It creates network partitions while fuzzing
+the system with random operations. The results are analyzed to see if the system
+violates any of the consistency properties it claims to have.
+
+As part of our Consul testing, we ran a Jepsen test to determine if
+any consistency issues could be uncovered. In our testing, Consul
+gracefully recovered from partitions without introducing any consistency
+issues.
+
+### Running the tests
+
+At the moment, testing with Jepsen is rather complex as it requires
+setting up multiple virtual machines, SSH keys, DNS configuration,
+and a working Clojure environment. We hope to contribute our Consul
+testing code upstream and to provide a Vagrant environment for Jepsen
+testing soon.
+
+### Output
+
+Below is the output captured from Jepsen. We ran Jepsen multiple times,
+and it passed each time. This output is only representative of a single
+run and has been edited for length. Please reach out on [Consul's Discuss](https://discuss.hashicorp.com/c/consul)
+if you would like to reproduce the Jepsen results.
+
+
+
+```shell-session
+$ lein test :only jepsen.system.consul-test
+
+lein test jepsen.system.consul-test
+INFO jepsen.os.debian - :n5 setting up debian
+INFO jepsen.os.debian - :n3 setting up debian
+INFO jepsen.os.debian - :n4 setting up debian
+INFO jepsen.os.debian - :n1 setting up debian
+INFO jepsen.os.debian - :n2 setting up debian
+INFO jepsen.os.debian - :n4 debian set up
+INFO jepsen.os.debian - :n5 debian set up
+INFO jepsen.os.debian - :n3 debian set up
+INFO jepsen.os.debian - :n1 debian set up
+INFO jepsen.os.debian - :n2 debian set up
+INFO jepsen.system.consul - :n1 consul nuked
+INFO jepsen.system.consul - :n4 consul nuked
+INFO jepsen.system.consul - :n5 consul nuked
+INFO jepsen.system.consul - :n3 consul nuked
+INFO jepsen.system.consul - :n2 consul nuked
+INFO jepsen.system.consul - Running nodes: {:n1 false, :n2 false, :n3 false, :n4 false, :n5 false}
+INFO jepsen.system.consul - :n2 consul nuked
+INFO jepsen.system.consul - :n3 consul nuked
+INFO jepsen.system.consul - :n4 consul nuked
+INFO jepsen.system.consul - :n5 consul nuked
+INFO jepsen.system.consul - :n1 consul nuked
+INFO jepsen.system.consul - :n1 starting consul
+INFO jepsen.system.consul - :n2 starting consul
+INFO jepsen.system.consul - :n4 starting consul
+INFO jepsen.system.consul - :n5 starting consul
+INFO jepsen.system.consul - :n3 starting consul
+INFO jepsen.system.consul - :n3 consul ready
+INFO jepsen.system.consul - :n2 consul ready
+INFO jepsen.system.consul - Running nodes: {:n1 true, :n2 true, :n3 true, :n4 true, :n5 true}
+INFO jepsen.system.consul - :n5 consul ready
+INFO jepsen.system.consul - :n1 consul ready
+INFO jepsen.system.consul - :n4 consul ready
+INFO jepsen.core - Worker 0 starting
+INFO jepsen.core - Worker 2 starting
+INFO jepsen.core - Worker 1 starting
+INFO jepsen.core - Worker 3 starting
+INFO jepsen.core - Worker 4 starting
+INFO jepsen.util - 2 :invoke :read nil
+INFO jepsen.util - 3 :invoke :cas [4 4]
+INFO jepsen.util - 0 :invoke :write 4
+INFO jepsen.util - 1 :invoke :write 1
+INFO jepsen.util - 4 :invoke :cas [4 0]
+INFO jepsen.util - 2 :ok :read nil
+INFO jepsen.util - 4 :fail :cas [4 0]
+(Log Truncated...)
+INFO jepsen.util - 4 :invoke :cas [3 3]
+INFO jepsen.util - 4 :fail :cas [3 3]
+INFO jepsen.util - :nemesis :info :stop nil
+INFO jepsen.util - :nemesis :info :stop "fully connected"
+INFO jepsen.util - 0 :fail :read nil
+INFO jepsen.util - 1 :fail :write 0
+INFO jepsen.util - :nemesis :info :stop nil
+INFO jepsen.util - :nemesis :info :stop "fully connected"
+INFO jepsen.core - nemesis done
+INFO jepsen.core - Worker 3 done
+INFO jepsen.util - 1 :invoke :read nil
+INFO jepsen.core - Worker 2 done
+INFO jepsen.core - Worker 4 done
+INFO jepsen.core - Worker 0 done
+INFO jepsen.util - 1 :ok :read 3
+INFO jepsen.core - Worker 1 done
+INFO jepsen.core - Run complete, writing
+INFO jepsen.core - Analyzing
+(Log Truncated...)
+INFO jepsen.core - Analysis complete
+INFO jepsen.system.consul - :n3 consul nuked
+INFO jepsen.system.consul - :n2 consul nuked
+INFO jepsen.system.consul - :n4 consul nuked
+INFO jepsen.system.consul - :n1 consul nuked
+INFO jepsen.system.consul - :n5 consul nuked
+1964 element history linearizable. :D
+
+Ran 1 tests containing 1 assertions.
+0 failures, 0 errors.
+```
+
+
diff --git a/website/content/docs/concept/gossip.mdx b/website/content/docs/concept/gossip.mdx
new file mode 100644
index 000000000000..7707cc798956
--- /dev/null
+++ b/website/content/docs/concept/gossip.mdx
@@ -0,0 +1,56 @@
+---
+layout: docs
+page_title: Gossip Protocol | Serf
+description: >-
+ Consul agents manage membership in datacenters and WAN federations using the Serf protocol. Learn about the differences between LAN and WAN gossip pools and how `serfHealth` affects health checks.
+---
+
+# Gossip Protocol
+
+Consul uses a [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol)
+to manage membership and broadcast messages to the cluster. The protocol, membership management, and message broadcasting is provided
+through the [Serf library](https://github.com/hashicorp/serf/). The gossip protocol
+used by Serf is based on a modified version of the
+[SWIM (Scalable Weakly-consistent Infection-style Process Group Membership)](https://www.cs.cornell.edu/projects/Quicksilver/public_pdfs/SWIM.pdf) protocol.
+Refer to the [Serf documentation](https://github.com/hashicorp/serf/blob/master/docs/internals/gossip.html.markdown) for additional information about the gossip protocol.
+
+## Gossip in Consul
+
+Consul uses a LAN gossip pool and a WAN gossip pool to perform different functions. The pools
+are able to perform their functions by leveraging an embedded [Serf](https://github.com/hashicorp/serf/)
+library. The library is abstracted and masked by Consul to simplify the user experience,
+but developers may find it useful to understand how the library is leveraged.
+
+### LAN Gossip Pool
+
+Each datacenter that Consul operates in has a LAN gossip pool containing all members
+of the datacenter (clients _and_ servers). Membership information provided by the
+LAN pool allows clients to automatically discover servers, reducing the amount of
+configuration needed. Failure detection is also distributed and shared by the entire cluster,
+instead of concentrated on a few servers. Lastly, the gossip pool allows for fast and
+reliable event broadcasts.
+
+### WAN Gossip Pool
+
+The WAN pool is globally unique. All servers should participate in the WAN pool,
+regardless of datacenter. Membership information provided by the WAN pool allows
+servers to perform cross-datacenter requests. The integrated failure detection
+allows Consul to gracefully handle loss of connectivity--whether the loss is for
+an entire datacenter, or a single server in a remote datacenter.
+
+## Lifeguard Enhancements ((#lifeguard))
+
+SWIM assumes that the local node is healthy, meaning that soft real-time packet
+processing is possible. The assumption may be violated, however, if the local node
+experiences CPU or network exhaustion. In these cases, the `serfHealth` check status
+can flap. This can result in false monitoring alarms, additional telemetry noise, and
+CPU and network resources being wasted as they attempt to diagnose non-existent failures.
+
+Lifeguard completely resolves this issue with novel enhancements to SWIM.
+
+For more details about Lifeguard, please see the
+[Making Gossip More Robust with Lifeguard](https://www.hashicorp.com/blog/making-gossip-more-robust-with-lifeguard/)
+blog post, which provides a high level overview of the HashiCorp Research paper
+[Lifeguard : SWIM-ing with Situational Awareness](https://arxiv.org/abs/1707.00788). The
+[Serf gossip protocol guide](https://github.com/hashicorp/serf/blob/master/docs/internals/gossip.html.markdown#lifeguard-enhancements)
+also provides some lower-level details about the gossip protocol and Lifeguard.
\ No newline at end of file
diff --git a/website/content/docs/concept/reliability.mdx b/website/content/docs/concept/reliability.mdx
new file mode 100644
index 000000000000..c339c277daac
--- /dev/null
+++ b/website/content/docs/concept/reliability.mdx
@@ -0,0 +1,228 @@
+---
+layout: docs
+page_title: Fault Tolerance in Consul
+description: >-
+ Fault tolerance is a system's ability to operate without interruption despite component failure. Learn how a set of Consul servers provide fault tolerance through use of a quorum, and how to further improve control plane resilience through use of infrastructure zones and Enterprise redundancy zones.
+---
+
+# Fault tolerance
+
+You must give careful consideration to reliability in the architecture frameworks that you build. When you build a resilient platform, it minimizes the remediation actions you need to take when a failure occurs. This document provides useful information on how to design and operate a resilient Consul cluster, including the methods and functionalities for this goal.
+
+Consul has many features that operate both locally and remotely that can help you offer a resilient service across multiple datacenters.
+
+## Introduction
+
+Fault tolerance is the ability of a system to continue operating without interruption
+despite the failure of one or more components. In Consul, the number of server agents determines the fault tolerance.
+
+
+Each Consul datacenter depends on a set of Consul voting server agents.
+The voting servers ensure Consul has a consistent, fault-tolerant state
+by requiring a majority of voting servers, known as a quorum, to agree upon any state changes.
+Examples of state changes include: adding or removing services,
+adding or removing nodes, and changes in service or node health status.
+
+Without a quorum, Consul experiences an outage:
+it cannot provide most of its capabilities because they rely on
+the availability of this state information.
+If Consul has an outage, normal operation can be restored by following the
+[Disaster recovery for Consul clusters guide](/consul/tutorials/datacenter-operations/recovery-outage).
+
+If Consul is deployed with 3 servers, the quorum size is 2. The deployment can lose 1
+server and still maintain quorum, so it has a fault tolerance of 1.
+If Consul is instead deployed with 5 servers, the quorum size increases to 3, so
+the fault tolerance increases to 2.
+To learn more about the relationship between the
+number of servers, quorum, and fault tolerance, refer to the
+[consensus protocol documentation](/consul/docs/concept/reliability#deployment-size).
+
+Effectively mitigating your risk is more nuanced than just increasing the fault tolerance
+because the infrastructure costs can outweigh the improved resiliency. You must also consider correlated risks at the infrastructure-level. There are occasions when multiple servers fail at the same time. That means that a single failure could cause a Consul outage, even if your server-level fault tolerance is 2.
+
+Different options for your resilient datacenter present trade-offs between operational complexity, computing cost, and Consul request performance. Consider these factors when designing your resilient architecture.
+
+## Fault tolerance
+
+The following sections explore several options for increasing Consul's fault tolerance. For enhanced reliability, we recommend taking a holistic approach by layering these multiple functionalities together.
+
+- Spread servers across infrastructure [availability zones](#availability-zones).
+- Use a [minimum quorum size](#quorum-size) to avoid performance impacts.
+- Use [redundancy zones](#redundancy-zones) to improve fault tolerance.
+- Use [Autopilot](#autopilot) to automatically prune failed servers and maintain quorum size.
+- Use [cluster peering](#cluster-peering) to provide service redundancy.
+
+### Availability zones
+
+The cloud or on-premise infrastructure underlying your [Consul datacenter](/consul/docs/install/glossary#datacenter) can run across multiple availability zones.
+
+An availability zone is meant to share no points of failure with other zones by:
+- Having power, cooling, and networking systems independent from other zones
+- Being physically distant enough from other zones so that large-scale disruptions
+ such as natural disasters (flooding, earthquakes) are very unlikely to affect multiple zones
+
+Availability zones are available in the regions of most cloud providers and in some on-premise installations.
+If possible, spread your Consul voting servers across 3 availability zones
+to protect your Consul datacenter from a single zone-level failure.
+For example, if deploying 5 Consul servers across 3 availability zones, place no more than 2 servers in each zone.
+If one zone fails, at most 2 servers are lost and quorum will be maintained by the 3 remaining servers.
+
+To distribute your Consul servers across availability zones, modify your infrastructure configuration with your infrastructure provider. No change is needed to your Consul server's agent configuration.
+
+Additionally, you should leverage resources that can automatically restore your compute instance,
+such as autoscaling groups, virtual machine scale sets, or compute engine autoscaler.
+Customize autoscaling resources to re-deploy servers into specific availability zones and ensure the desired numbers of servers are available at all times.
+
+### Quorum size
+
+For most production use cases, we recommend using a minimum quorum of either 3 or 5 voting servers,
+yielding a server-level fault tolerance of 1 or 2 respectively.
+
+Even though it would improve fault tolerance,
+adding voting servers beyond 5 is **not recommended** because it decreases Consul's performance—
+it requires Consul to involve more servers in every state change or consistent read.
+
+Consul Enterprise users can use redundancy zones to improve fault tolerance without this performance penalty.
+
+### Redundancy zones
+
+Use Consul Enterprise [redundancy zones](/consul/docs/manage/scale/redundancy-zone) to improve fault tolerance without the performance penalty of increasing the number of voting servers.
+
+
+
+
+Each redundancy zone should be assigned 2 or more Consul servers.
+If all servers are healthy, only one server per redundancy zone will be an active voter;
+all other servers will be backup voters.
+If a zone's voter is lost, it will be replaced by:
+- A backup voter within the same zone, if any. Otherwise,
+- A backup voter within another zone, if any.
+
+Consul can replace lost voters with backup voters within 30 seconds in most cases.
+Because this replacement process is not instantaneous,
+redundancy zones do not improve immediate fault tolerance—
+the number of healthy voting servers that can fail at once without causing an outage.
+Instead, redundancy zones improve optimistic fault tolerance:
+the number of healthy active and back-up voting servers that can fail gradually without causing an outage.
+
+The relationship between these two types of fault tolerance is:
+
+_Optimistic fault tolerance = immediate fault tolerance + the number of healthy backup voters_
+
+For example, consider a Consul datacenter with 3 redundancy zones and 2 servers per zone.
+There will be 3 voting servers (1 per zone), meaning a quorum size of 2 and an immediate fault tolerance of 1.
+There will also be 3 backup voters (1 per zone), each of which increase the optimistic fault tolerance.
+Therefore, the optimistic fault tolerance is 4.
+This provides performance similar to a 3 server setup with fault tolerance similar to a 7 server setup.
+
+We recommend associating each Consul redundancy zone with an infrastructure availability zone
+to also gain the infrastructure-level fault tolerance benefits provided by availability zones.
+However, Consul redundancy zones can be used even without the backing of infrastructure availability zones.
+
+For more information on redundancy zones, refer to:
+- [Redundancy zone documentation](/consul/docs/manage/scale/redundancy-zone)
+ for a more detailed explanation
+- [Redundancy zone tutorial](/consul/tutorials/enterprise/redundancy-zones)
+ to learn how to use them
+
+### Autopilot
+
+Autopilot is a set of functions that introduce servers to a cluster, cleans up dead servers, and monitors the state of the Raft protocol in the Consul cluster.
+
+When you enable Autopilot's dead server cleanup, Autopilot marks failed servers as `Left` and removes them from the Raft peer set to prevent them from interfering with the quorum size. Autopilot does that as soon as a replacement Consul server comes online. This behavior is beneficial when server nodes failed and have been redeployed but Consul considers them as new nodes because their IP address and hostnames have changed. Autopilot keeps the cluster peer set size correct and the quorum requirement simple.
+
+To illustrate the Autopilot advantage, consider a scenario where Consul has a cluster of five server nodes. The quorum is three, which means the cluster can lose two server nodes before the cluster fails. The following events happen:
+
+1. Two server nodes fail.
+1. Two replacement nodes are deployed with new hostnames and IPs.
+1. The two replacement nodes rejoin the Consul cluster.
+1. Consul treats the replacement nodes as extra nodes, unrelated to the previously failed nodes.
+
+_With Autopilot not enabled_, the following happens:
+
+1. Consul does not immediately clean up the failed nodes when the replacement nodes join the cluster.
+1. The cluster now has the three surviving nodes, the two failed nodes, and the two replacement nodes, for a total of seven nodes.
+ - The quorum is increased to four, which means the cluster can only afford to lose one node until after the two failed nodes are deleted in seventy-two hours.
+ - The redundancy level has decreased from its initial state.
+
+_With Autopilot enabled_, the following happens:
+
+1. Consul immediately cleans up the failed nodes when the replacement nodes join the cluster.
+1. The cluster now has the three surviving nodes and the two replacement nodes, for a total of five nodes.
+ - The quorum stays at three, which means the cluster can afford to lose two nodes before it fails.
+ - The redundancy level remains the same.
+
+### Cluster peering
+
+Linking multiple Consul clusters together to provide service redundancy is the most effective method to prevent disruption from failure. This method is enhanced when you design individual Consul clusters with resilience in mind. Consul clusters interconnect in two ways: WAN federation and cluster peering. We recommend using cluster peering whenever possible.
+
+Cluster peering lets you connect two or more independent Consul clusters using mesh gateways, so that services can communicate between non-identical partitions in different datacenters.
+
+
+
+
+Cluster peering is the preferred way to interconnect clusters because it is operationally easier to configure and manage than WAN federation. Cluster peering communication between two datacenters runs only on one port on the related Consul mesh gateway, which makes it operationally easy to expose for routing purposes.
+
+When you use cluster peering to connect admin partitions between datacenters, use Consul’s dynamic traffic management functionalities `service-splitter`, `service-router` and `service-failover` to configure your service mesh to automatically forward or failover service traffic between peer clusters. Consul can then manage the traffic intended for the service and do [failover](/consul/docs/reference/config-entry/service-resolver#spec-failover), [load-balancing](/consul/docs/reference/config-entry/service-resolver#spec-loadbalancer), or [redirection](/consul/docs/reference/config-entry/service-resolver#spec-redirect).
+
+Cluster peering also extends service discovery across different datacenters independent of service mesh functions. After you peer datacenters, you can refer to services between datacenters with `.virtual.peer.consul` in Consul DNS. For Consul Enterprise, your query string may need to include the namespace, partition, or both. Refer to the [Consul DNS documentation](/consul/docs/services/discovery/dns-static-lookups#service-virtual-ip-lookups) for details on building virtual service lookups.
+
+For more information on cluster peering, refer to:
+- [Cluster peering documentation](/consul/docs/east-west/cluster-peering)
+ for a more detailed explanation
+- [Cluster peering tutorial](/consul/tutorials/implement-multi-tenancy/cluster-peering)
+ to learn how to implement cluster peering
+
+## Deployment size
+
+The following table shows quorum size and failure tolerance for various
+cluster sizes. The recommended deployment is either 3 or 5 servers. A single
+server deployment is _**highly**_ discouraged as data loss is inevitable in a
+failure scenario.
+
+
+
+
+
Servers
+
Quorum Size
+
Failure Tolerance
+
+
+
+
+
1
+
1
+
0
+
+
+
2
+
2
+
0
+
+
+
3
+
2
+
1
+
+
+
4
+
3
+
1
+
+
+
5
+
3
+
2
+
+
+
6
+
4
+
2
+
+
+
7
+
4
+
3
+
+
+
\ No newline at end of file
diff --git a/website/content/docs/concepts/service-discovery.mdx b/website/content/docs/concepts/service-discovery.mdx
deleted file mode 100644
index 44c83b74147d..000000000000
--- a/website/content/docs/concepts/service-discovery.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: docs
-page_title: Service Discovery Explained
-description: >-
- Service discovery dynamically tracks and monitors service instances on your network and makes them discoverable through DNS queries. Learn about the benefits of service discovery and how it works.
----
-
-# What is service discovery?
-
-_Service discovery_ helps you discover, track, and monitor the health of services within a network. Service discovery registers and maintains a record of all your services in a _service catalog_. This service catalog acts as a single source of truth that allows your services to query and communicate with each other.
-
-## Benefits of service discovery
-
-Service discovery provides benefits for all organizations, ranging from simplified scalability to improved application resiliency. Some of the benefits of service discovery include:
-
-- Dynamic IP address and port discovery
-- Simplified horizontal service scaling
-- Abstracts discovery logic away from applications
-- Reliable service communication ensured by health checks
-- Load balances requests across healthy service instances
-- Faster deployment times achieved by high-speed discovery
-- Automated service registration and de-registration
-
-## How does service discovery work?
-
-Service discovery uses a service's identity instead of traditional access information (IP address and port). This allows you to dynamically map services and track any changes within a service catalog. Service consumers (users or other services) then use DNS to dynamically retrieve other service's access information from the service catalog. The lifecycle of a service may look like the following:
-
-A service consumer communicates with the "Web" service via a unique Consul DNS entry provided by the service catalog.
-
-
-
-A new instance of the "Web" service registers itself to the service catalog with its IP address and port. As new instances of your services are registered to the service catalog, they will participate in the load balancing pool for handling service consumer requests.
-
-
-
-The service catalog is dynamically updated as new instances of the service are added and legacy or unhealthy service instances are removed. Removed services will no longer participate in the load balancing pool for handling service consumer requests.
-
-
-
-## What is service discovery in microservices?
-
-In a microservices application, the set of active service instances changes frequently across a large, dynamic environment. These service instances rely on a service catalog to retrieve the most up-to-date access information from the respective services. A reliable service catalog is especially important for service discovery in microservices to ensure healthy, scalable, and highly responsive application operation.
-
-## What are the two main types of service discovery?
-
-There are two main service‑discovery patterns: _client-side_ discovery and _server-side_ discovery.
-
-In systems that use client‑side discovery, the service consumer is responsible for determining the access information of available service instances and load balancing requests between them.
-
-1. The service consumer queries the service catalog
-1. The service catalog retrieves and returns all access information
-1. The service consumer selects a healthy downstream service and makes requests directly to it
-
-
-
-In systems that use server‑side discovery, the service consumer uses an intermediary to query the service catalog and make requests to them.
-
-1. The service consumer queries an intermediary (Consul)
-1. The intermediary queries the service catalog and routes requests to the available service instances.
-
-
-
-For modern applications, this discovery method is advantageous because developers can make their applications faster and more lightweight by decoupling and centralizing service discovery logic.
-
-## Service discovery vs load balancing
-
-Service discovery and load balancing share a similarity in distributing requests to back end services, but differ in many important ways.
-
-Traditional load balancers are not designed for rapid registration and de-registration of services, nor are they designed for high-availability. By contrast, service discovery systems use multiple nodes that maintain the service registry state and a peer-to-peer state management system for increased resilience across any type of infrastructure.
-
-For modern, cloud-based applications, service discovery is the preferred method for directing traffic to the right service provider due to its ability to scale and remain resilient, independent of infrastructure.
-
-## How do you implement service discovery?
-
-You can implement service discovery systems across any type of infrastructure, whether it is on-premise or in the cloud. Service discovery is a native feature of many container orchestrators such as Kubernetes or Nomad. There are also platform-agnostic service discovery methods available for non-container workloads such as VMs and serverless technologies. Implementing a resilient service discovery system involves creating a set of servers that maintain and facilitate service registry operations. You can achieve this by installing a service discovery system or using a managed service discovery service.
-
-## What is Consul?
-
-Consul is a service networking solution that lets you automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. With these features, Consul helps you solve the complex networking and security challenges of operating microservices and cloud infrastructure (multi-cloud and hybrid cloud). You can use these features independently or together to achieve [zero trust](https://www.hashicorp.com/solutions/zero-trust-security) security.
-
-Consul's service discovery capabilities help you discover, track, and monitor the health of services within a network. Consul acts as a single source of truth that allows your services to query and communicate with each other.
-
-You can use Consul with virtual machines (VMs), containers, serverless technologies, or with container orchestration platforms, such as [Nomad](https://www.nomadproject.io/) and Kubernetes. Consul is platform agnostic which makes it a great fit for all environments, including legacy platforms.
-
-Consul is available as a [self-managed](/consul/downloads) project or as a fully managed service mesh solution ([HCP Consul Dedicated](https://portal.cloud.hashicorp.com/sign-in?utm_source=consul_docs)). HCP Consul Dedicated enables users to discover and securely connect services without the added operational burden of maintaining a service mesh on their own.
-
-## Next steps
-
-Get started with service discovery today by leveraging Consul on HCP, Consul on Kubernetes, or Consul on VMs. Prepare your organization for the future of multi-cloud and embrace a [zero-trust](https://www.hashicorp.com/solutions/zero-trust-security) architecture.
-
-Feel free to get started with Consul by exploring one of these Consul tutorials:
-
-- [Get Started with Consul on VMs](/consul/tutorials/get-started-vms)
-- [Get Started with Consul on HCP](/consul/tutorials/get-started-hcp)
-- [Get Started with Consul on Kubernetes](/consul/tutorials/get-started-kubernetes)
diff --git a/website/content/docs/concepts/service-mesh.mdx b/website/content/docs/concepts/service-mesh.mdx
deleted file mode 100644
index 33ebf1478d83..000000000000
--- a/website/content/docs/concepts/service-mesh.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Explained
-description: >-
- Service mesh is a dedicated network layer for secure, resilient, observable microservice communication. Learn about using Consul's service mesh to solve service networking challenges in application architectures and manage complexity in multi-cloud, hybrid cloud, and multi-platform environments.
----
-
-# What is a service mesh?
-
-A _service mesh_ is a dedicated network layer that provides secure service-to-service communication within and across infrastructure, including on-premises and cloud environments.
-Service meshes are often used with a microservice architectural pattern, but can provide value in any scenario where complex networking is involved.
-
-## Benefits of a service mesh
-
-A service mesh provides benefits for all organizations, ranging from security to improved application resiliency.
-Some of the benefits of a service mesh include;
-
-- service discovery
-- application health monitoring
-- load balancing
-- automatic failover
-- traffic management
-- encryption
-- observability and traceability
-- authentication and authorization
-- network automation
-
-A common use case for leveraging a service mesh is to achieve a [_zero trust_ model](https://www.consul.io/use-cases/zero-trust-networking).
-In a zero trust model, applications require identity-based access to ensure all communication within the service mesh is authenticated with TLS certificates and encrypted in transit.
-
-In traditional security strategies, protection is primarily focused at the perimeter of a network.
-In cloud environments, the surface area for network access is much wider than the traditional on-premises networks.
-In addition, traditional security practices overlook the fact that many bad actors can originate from within the network walls.
-A zero trust model addresses these concerns while allowing organizations to scale as needed.
-
-## How does a service mesh work?
-
-A service mesh typically consist of a control plane and a data plane. The control plane maintains a central registry that keeps track of all services and their respective IP addresses. This activity is called [service discovery](https://www.hashicorp.com/products/consul/service-discovery-and-health-checking).
-As long as the application is registered with the control plane, the control plane will be able to share with other members of the mesh how to communicate with the application and enforce rules for who can communicate with each other.
-
-The control plane is responsible for securing the mesh, facilitating service discovery, health checking, policy enforcement, and other similar operational concerns.
-
-The data plane handles communication between services.
-Many service mesh solutions employ a sidecar proxy to handle data plane communications, and thus limit the level of awareness the services need to have about the network environment.
-
-
-
-## API gateway vs service mesh
-
-An API gateway is a centralized access point for handling incoming client requests and delivering them to services.
-The API gateway acts as a control plane that allows operators and developers to manage incoming client requests and apply different handling logic depending on the request.
-The API gateway will route the incoming requests to the respective service. The primary function of an API gateway is to handle requests and return the reply from the service back to the client.
-
-A service mesh specializes in the network management of services and the communication between services.
-The mesh is responsible for keeping track of services and their health status, IP address, and traffic routing and ensuring all traffic between services is authenticated and encrypted.
-Unlike some API gateways, a service mesh will track all registered services' lifecycle and ensure requests are routed to healthy instances of the service.
-API gateways are frequently deployed alongside a load balancer to ensure traffic is directed to healthy and available instances of the service.
-The mesh reduces the load balancer footprint as routing responsibilities are handled in a decentralized manner.
-
-API gateways can be used with a service mesh to bridge external networks (non-mesh) with a service mesh.
-
--> **API gateways and traffic direction:** API gateways are often used to accept north-south traffic. North-south traffic is networking traffic that either enters or exits a datacenter or a virtual private network (VPC). You can connect API gateways to a service mesh and provide access to it from outside the mesh.
-A service mesh is primarily used for handling east-west traffic. East-west traffic traditionally remains inside a data center or a VPC.
-A service mesh can be connected to another service mesh in another data center or VPC to form a federated mesh.
-
-## What problems does a service mesh solve?
-
-Modern infrastructure is transitioning from being primarily static to dynamic in nature (ephemeral).
-This dynamic infrastructure has a short life cycle, meaning virtual machines (VM) and containers are frequently recycled.
-It's difficult for an organization to manage and keep track of application services that live on short-lived resources. A service mesh solves this problem by acting as a central registry of all registered services.
-As instances of a service (e.g., VM, container, serverless functions) come up and down, the mesh is aware of their state and availability. The ability to conduct _service discovery_ is the foundation to the other problems a service mesh solves.
-
-As a service mesh is aware of the state of a service and its instances, the mesh can implement more intelligent and dynamic network routing.
-Many service meshes offer L7 traffic management capabilities. As a result, operators and developers can create powerful rules to direct network traffic as needed, such as load balancing, traffic splitting, dynamic failover, and custom resolvers.
-A service mesh's dynamic network behavior allows application owners to improve application resiliency and availability with no application changes.
-
-Implementing dynamic network behavior is critical as more and more applications are deployed across different cloud providers (multi-cloud) and private data centers.
-Organizations may need to route network traffic to other infrastructure environments. Ensuring this traffic is secure is on top of mind for all organizations.
-Service meshes offer the ability to enforce network traffic encryption (mTLS) and authentication between all services. The service mesh can automatically generate an SSL certificate for each service and its instances.
-The certificate authenticates with other services inside the mesh and encrypts the TCP/UDP/gRPC connection with SSL.
-
-Fine-grained policies that dictate what services are allowed to communicate with each other is another benefit of a service mesh.
-Traditionally, services are permitted to communicate with other services through firewall rules.
-The traditional firewall (IP-based) model is difficult to enforce with dynamic infrastructure resources with a short lifecycle and frequently recycling IP addresses.
-As a result, network administrators have to open up network ranges to permit network traffic between services without differentiating the services generating the network traffic. However, a service mesh allows operators and developers to shift away from an IP-based model and focus more on service to service permissions.
-An operator defines a policy that only allows _service A_ to communicate with _service B_. Otherwise, the default action is to deny the traffic.
-This shift from an IP address-based security model to a service-focused model reduces the overhead of securing network traffic and allows an organization to take advantage of multi-cloud environments without sacrificing security due to complexity.
-
-## How do you implement a service mesh?
-
-Service meshes are commonly installed in Kubernetes clusters. There are also platform-agnostic service meshes available for non-Kubernetes-based workloads.
-For Kubernetes, most service meshes can be installed by operators through a [Helm chart](https://helm.sh/). Additionally, the service mesh may offer a CLI tool that supports the installation and maintenance of the service mesh.
-Non-Kubernetes based service meshes can be installed through infrastructure as code (IaC) products such as [Terraform](https://www.terraform.io/), CloudFormation, ARM Templates, Puppet, Chef, etc.
-
-## What is a multi platform service mesh?
-
-A multi-platform service mesh is capable of supporting various infrastructure environments.
-This can range from having the service mesh support Kubernetes and non-Kubernetes workloads, to having a service mesh span across various cloud environments (multi-cloud and hybrid cloud).
-
-## What is Consul?
-
-Consul is a multi-networking tool that offers a fully-featured service mesh solution that solves the networking and security challenges of operating microservices and cloud infrastructure (multi-cloud and hybrid cloud).
-Consul offers a software-driven approach to routing and segmentation. It also brings additional benefits such as failure handling, retries, and network observability.
-Each of these features can be used individually as needed or they can be used together to build a full service mesh and achieve [zero trust](https://www.hashicorp.com/solutions/zero-trust-security) security.
-In simple terms, Consul is the control plane of the service mesh. The data plane is supported by Consul through its first class support of [Envoy](https://www.envoyproxy.io/) as a proxy.
-
-You can use Consul with virtual machines (VMs), containers, or with container orchestration platforms, such as [Nomad](https://www.nomadproject.io/) and Kubernetes.
-Consul is platform agnostic which makes it a great fit for all environments, including legacy platforms.
-
-Consul is available as a [self-install](/consul/downloads) project or as a fully managed service mesh solution called [HCP Consul Dedicated](https://portal.cloud.hashicorp.com/sign-in?utm_source=consul_docs).
-HCP Consul Dedicated enables users to discover and securely connect services without the added operational burden of maintaining a service mesh on their own.
-
-You can learn more about Consul by visiting the Consul [tutorials](/consul/tutorials).
-
-## Next
-
-Get started today with a service mesh by leveraging [HCP Consul Dedicated](https://portal.cloud.hashicorp.com/sign-in?utm_source=consul_docs).
-Prepare your organization for the future of multi-cloud and embrace a [zero-trust](https://www.hashicorp.com/solutions/zero-trust-security) architecture.
diff --git a/website/content/docs/connect/ca/aws.mdx b/website/content/docs/connect/ca/aws.mdx
deleted file mode 100644
index cac5cb46e650..000000000000
--- a/website/content/docs/connect/ca/aws.mdx
+++ /dev/null
@@ -1,182 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Certificate Authority - AWS Certificate Manager
-description: >-
- You can use the AWS Certificate Manager Private Certificate Authority as the Consul service mesh's certificate authority to secure your service mesh. Learn how to configure the AWS ACM Private CA, its limitations in Consul, and cost planning considerations.
----
-
-# AWS Certificate Manager as a Service Mesh Certificate Authority
-
-Consul can be used with [AWS Certificate Manager (ACM) Private Certificate
-Authority
-(CA)](https://aws.amazon.com/certificate-manager/private-certificate-authority/)
-to manage and sign certificates.
-
--> This page documents the specifics of the AWS ACM Private CA provider.
-Please read the [certificate management overview](/consul/docs/connect/ca)
-page first to understand how Consul manages certificates with configurable
-CA providers.
-
-## Requirements
-
-The ACM Private CA Provider was added in Consul 1.7.0.
-
-The ACM Private CA Provider needs to be authorized via IAM credentials to
-perform operations. Every Consul server needs to be running in an environment
-where a suitable IAM configuration is present.
-
-The [standard AWS SDK credential
-locations](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials)
-are used, which means that suitable credentials and region configuration need to be present in one of the following:
-
-1. Environment variables
-1. Shared credentials file
-1. Via an EC2 instance role
-
-The IAM credential provided must have permission for the following actions:
-
-- CreateCertificateAuthority - assuming an existing CA is not specified in `existing_arn`
-- DescribeCertificateAuthority
-- GetCertificate
-- IssueCertificate
-
-## Configuration
-
-The ACM Private CA provider is enabled by setting the CA provider to
-`"aws-pca"` in the agent's [`ca_provider`] configuration option, or via the
-[`/connect/ca/configuration`] API endpoint. At this time there is only one,
-optional configuration value.
-
-Example configurations are shown below:
-
-
-
-
-
-```hcl
-# ...
-connect {
- enabled = true
- ca_provider = "aws-pca"
- ca_config {
- existing_arn = "arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-123456789012"
- }
-}
-```
-
-
-
-
-
-```json
-{
- "Provider": "aws-pca",
- "Config": {
- "ExistingARN": "arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-123456789012"
- }
-}
-```
-
-
-
-
-
-~> **Note**: Suitable AWS IAM credentials are necessary for the provider to
-work. However, these are not configured in the Consul config which is typically
-on disk, and instead rely on the [standard AWS SDK configuration
-locations](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).
-
-The configuration options are listed below.
-
--> **Note**: The first key is the value used in API calls, and the second key
- (after the `/`) is used if you are adding the configuration to the agent's
- configuration file.
-
-- `ExistingARN` / `existing_arn` (`string: `) - The Amazon Resource
- Name (ARN) of an existing private CA in your ACM account. If specified,
- Consul will attempt to use the existing CA to issue certificates.
-
- - In the primary datacenter this ARN **must identify a root CA**. See
- [limitations](#limitations).
- - In a secondary datacenter, it must identify a subordinate CA signed by
- the same root used in the primary datacenter. If it is signed by another
- root, Consul will automatically create a new subordinate signed by the
- primary's root instead.
-
- The default behavior with no `ExistingARN` specified is for Consul to
- create a new root CA in the primary datacenter and a subordinate CA in
- each secondary DC.
-
-@include 'http_api_connect_ca_common_options.mdx'
-
-## Limitations
-
-ACM Private CA has several
-[limits](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html)
-that restrict how fast certificates can be issued. This may impact how quickly
-large clusters can rotate all issued certificates.
-
-Currently, the ACM Private CA provider for service mesh has some additional
-limitations described below.
-
-### Unable to Cross-sign Other CAs
-
-It's not possible to cross-sign other CA provider's root certificates during a
-migration. ACM Private CA is capable of doing that through a different workflow
-but is not able to blindly cross-sign another root certificate without a CSR
-being generated. Both Consul's built-in CA and Vault can do this and the current
-workflow for managing CAs relies on it.
-
-For now, the limitation means that once ACM Private CA is configured as the CA
-provider, it is not possible to reconfigure a different CA provider, or rotate
-the root CA key without potentially observing some transient connection
-failures. See the section on [forced rotation without
-cross-signing](/consul/docs/connect/ca#forced-rotation-without-cross-signing) for
-more details.
-
-### Primary DC Must be a Root CA
-
-Currently, if an existing ACM Private CA is used, the primary DC must use a Root
-CA directly to issue certificates.
-
-## Cost Planning
-
-To help estimate costs, an example is provided below of the resources that would
-be used.
-
-~> This is intended to illustrate the behavior of the CA for cost planning
-purposes. Please refer to the [pricing for ACM Private
-CA](https://aws.amazon.com/certificate-manager/pricing/) for actual cost
-information.
-
-Assume the following Consul datacenters exist and are configured to use ACM
-Private CA as their service mesh CA with the default leaf certificate lifetime of
-72 hours:
-
-| Datacenter | Primary | CA Resource Created | Number of service instances |
-| ---------- | ------- | ------------------- | --------------------------- |
-| dc1 | yes | 1 ROOT | 100 |
-| dc2 | no | 1 SUBORDINATE | 50 |
-| dc3 | no | 1 SUBORDINATE | 500 |
-
-Leaf certificates are valid for 72 hours but are refreshed when
-between 60% and 90% of their lifetime has elapsed. On average each certificate
-will be reissued every 54 hours or roughly 13.3 times per month.
-
-So monthly cost would be calculated as:
-
-- 3 ⨉ Monthly CA cost, plus
-- 8630 ⨉ Certificate Issue cost, made up of:
- - 100 ⨉ 13.3 = 1,330 certificates issued in dc1
- - 50 ⨉ 13.3 = 665 certificates issued in dc2
- - 500 ⨉ 13.3 = 6,650 certificates issued in dc3
-
-The number of certificates issued could be reduced by increasing
-[`leaf_cert_ttl`](/consul/docs/agent/config/config-files#ca_leaf_cert_ttl) in the CA Provider
-configuration if the longer lived credentials are an acceptable risk tradeoff
-against the cost.
-
-
-[`ca_config`]: /consul/docs/agent/config/config-files#connect_ca_config
-[`ca_provider`]: /consul/docs/agent/config/config-files#connect_ca_provider
-[`/connect/ca/configuration`]: /consul/api-docs/connect/ca#update-ca-configuration
diff --git a/website/content/docs/connect/ca/consul.mdx b/website/content/docs/connect/ca/consul.mdx
deleted file mode 100644
index 870bea4fe434..000000000000
--- a/website/content/docs/connect/ca/consul.mdx
+++ /dev/null
@@ -1,137 +0,0 @@
----
-layout: docs
-page_title: Certificate Authority - Built-in Service Mesh CA
-description: >-
- Consul has a built-in service mesh certificate authority that can be used to secure your service mesh without needing a separate CA system. Learn how to configure the built-in service mesh CA as a root CA or an intermediate CA connected to an existing PKI system.
----
-
-# Built-In Certificate Authority for Service Mesh
-
-Consul ships with a built-in CA system so that service mesh can be
-easily enabled out of the box. The built-in CA generates and stores the
-root certificate and private key on Consul servers. It can also be
-configured with a custom certificate and private key if needed.
-
-If service mesh is enabled and no CA provider is specified, the built-in
-CA is the default provider used. The provider can be
-[updated and rotated](/consul/docs/connect/ca#root-certificate-rotation)
-at any point to migrate to a new provider.
-
--> This page documents the specifics of the built-in CA provider.
-Please read the [certificate management overview](/consul/docs/connect/ca)
-page first to understand how Consul manages certificates with configurable
-CA providers.
-
-## Configuration
-
-The built-in CA provider has no required configuration. Enabling service mesh
-alone will configure the built-in CA provider, and will automatically generate
-a root certificate and private key:
-
-
-
-```hcl
-# ...
-connect {
- enabled = true
-}
-```
-
-
-
-The configuration options are listed below.
-
--> **Note**: The first key is the value used in API calls, and the second key
-(after the `/`) is used if you are adding the configuration to the agent's
-configuration file.
-
-- `PrivateKey` / `private_key` (`string: ""`) - A PEM-encoded private key
- for signing operations. This must match the private key used for the root
- certificate if it is manually specified. If this is blank, a private key
- is automatically generated.
-
-- `RootCert` / `root_cert` (`string: ""`) - A PEM-encoded root certificate
- to use. If this is blank, a root certificate is automatically generated
- using the private key specified. If this is specified, the certificate
- must be a valid
- [SPIFFE SVID signing certificate](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md)
- and the URI in the SAN must match the cluster identifier created at
- bootstrap with the ".consul" TLD. The cluster identifier can be found
- using the [CA List Roots endpoint](/consul/api-docs/connect/ca#list-ca-root-certificates).
-
-@include 'http_api_connect_ca_common_options.mdx'
-
-## Specifying a Custom Private Key and Root Certificate
-
-By default, a root certificate and private key will be automatically
-generated during the cluster's bootstrap. It is possible to configure
-the Consul CA provider to use a specific private key and root certificate.
-This is particularly useful if you have an external PKI system that doesn't
-currently integrate with Consul directly.
-
-To view the current CA configuration, use the [Get CA Configuration endpoint](/consul/api-docs/connect/ca#get-ca-configuration):
-
-```shell-session
-$ curl localhost:8500/v1/connect/ca/configuration
-{
- "Provider": "consul",
- "Config": {
- "LeafCertTTL": "72h",
- "IntermediateCertTTL": "8760h"
- },
- "CreateIndex": 5,
- "ModifyIndex": 5
-}
-```
-
-This is the default service mesh CA configuration if nothing is explicitly set when
-service mesh is enabled - the PrivateKey and RootCert fields have not been set, so those have
-been generated (as seen above in the roots list).
-
-There are two ways to have the Consul CA use a custom private key and root certificate:
-either through the `ca_config` section of the [Agent configuration](/consul/docs/agent/config/config-files#connect_ca_config) (which can only be used during the cluster's
-initial bootstrap) or through the [Update CA Configuration endpoint](/consul/api-docs/connect/ca#update-ca-configuration).
-
-Currently Consul requires that root certificates are valid [SPIFFE SVID Signing certificates](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded
-in the SAN is the cluster identifier created at bootstrap with the ".consul" TLD. In this
-example, we will set the URI SAN to `spiffe://36cb52cd-4058-f811-0432-6798a240c5d3.consul`.
-
-In order to use the Update CA Configuration HTTP endpoint, the private key and certificate
-must be passed via JSON:
-
-```shell-session
-$ jq --null-input --rawfile key root.key --rawfile cert root.crt '
-{
- "Provider": "consul",
- "Config": {
- "LeafCertTTL": "72h",
- "PrivateKey": $key | sub("\\n$"; ""),
- "RootCert": $cert | sub("\\n$"; ""),
- "IntermediateCertTTL": "8760h"
- }
-}' > ca_config.json
-```
-
-The resulting `ca_config.json` file can then be used to update the active root certificate:
-
-```shell-session
-$ cat ca_config.json
-{
- "Provider": "consul",
- "Config": {
- "LeafCertTTL": "72h",
- "PrivateKey": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEArqiy1c3pbT3cSkjdEM1APALUareU...",
- "RootCert": "-----BEGIN CERTIFICATE-----\nMIIDijCCAnKgAwIBAgIJAOFZ66em1qC7MA0GCSqGSIb3...",
- "IntermediateCertTTL": "8760h"
- }
-}
-
-$ curl --request PUT --data @ca_config.json localhost:8500/v1/connect/ca/configuration
-
-...
-
-[INFO] connect: CA rotated to new root under provider "consul"
-```
-
-The cluster is now using the new private key and root certificate. Updating the CA config
-this way also triggered a certificate rotation.
diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx
deleted file mode 100644
index c49e07516fae..000000000000
--- a/website/content/docs/connect/ca/index.mdx
+++ /dev/null
@@ -1,263 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Certificate Authority - Overview
-description: >-
- Consul uses a certificate authority (CA) to generate, use, manage, sign, and store certificates for your service mesh. Learn about certificate management, including configuration, root cert rotation, cross-signing, and regenerating the CA.
----
-
-# Service Mesh Certificate Authority Overview
-
-Service mesh certificate management is done centrally through the Consul
-servers using the configured service mesh CA (Certificate Authority) provider. A CA provider
-manages root and intermediate certificates and performs certificate signing
-operations. The Consul leader orchestrates CA provider operations as necessary,
-such as when a service needs a new certificate or during CA rotation events.
-
-The CA provider abstraction enables Consul to support multiple systems for
-storing and signing certificates. Consul ships with a
-[built-in CA](/consul/docs/connect/ca/consul) which generates and stores the
-root certificate and private key on the Consul servers. Consul also has
-support for using
-[Vault as a CA](/consul/docs/connect/ca/vault). With Vault, the root certificate
-and private key material remain with the Vault cluster.
-
-## CA and Certificate relationship
-
-This diagram shows the relationship between the CA certificates in a Consul primary datacenter and a
-secondary Consul datacenter.
-
-
-
-Leaf certificates are created for two purposes:
-- the Leaf Cert Service is used by envoy proxies in the mesh to perform mTLS with other
-services.
-- the Leaf Cert Client Agent is created by auto-encrypt and auto-config. It is used by
-client agents for HTTP API TLS, and for mTLS for RPC requests to servers.
-
-Any secondary datacenters use their CA provider to generate an intermediate certificate
-signing request (CSR) to be signed by the primary root CA. They receive an intermediate
-CA certificate, which is used to sign leaf certificates in the secondary datacenter.
-
-You can use different providers across primary and secondary datacenters.
-For example, an operator may use a Vault CA provider for extra security in the primary
-datacenter but choose to use the built-in CA provider in the secondary datacenter, which
-may not have a reachable Vault cluster. The following table compares the built-in and Vault providers.
-
-## CA Provider Comparison
-
-| | Consul built-in | Vault |
-|------------|------------------------------------|-----------------------------------------------------------------------------------|
-| Security | CA private keys are stored on disk | CA private keys are stored in Vault and are never exposed to Consul server agents |
-| Resiliency | No dependency on external systems. If Consul is available, it can sign certificates | Dependent on Vault availability |
-| Latency | Consul signs certificates locally | A network call to Vault is required to sign certificates |
-
-## CA Bootstrapping
-
-CA initialization happens automatically when a new Consul leader is elected
-as long as
-[service mesh is enabled](/consul/docs/connect/configuration#agent-configuration),
-and the CA system has not already been initialized. This initialization process
-will generate the initial root certificates and setup the internal Consul server
-state.
-
-For the initial bootstrap, the CA provider can be configured through the
-[Agent configuration](/consul/docs/agent/config/config-files#connect_ca_config). After
-initialization, the CA can only be updated through the
-[Update CA Configuration API endpoint](/consul/api-docs/connect/ca#update-ca-configuration).
-If a CA is already initialized, any changes to the CA configuration in the
-agent configuration file (including removing the configuration completely)
-will have no effect.
-
-If no specific provider is configured when service mesh is enabled, the built-in
-Consul CA provider will be used and a private key and root certificate will
-be generated automatically.
-
-## Viewing Root Certificates
-
-Root certificates can be queried with the
-[list CA Roots endpoint](/consul/api-docs/connect/ca#list-ca-root-certificates).
-With this endpoint, you can see the list of currently trusted root certificates.
-When a cluster first initializes, this will only list one trusted root. Multiple
-roots may appear as part of
-[rotation](#root-certificate-rotation).
-
-```shell-session
-$ curl http://localhost:8500/v1/connect/ca/roots
-{
- "ActiveRootID": "31:6c:06:fb:49:94:42:d5:e4:55:cc:2e:27:b3:b2:2e:96:67:3e:7e",
- "TrustDomain": "36cb52cd-4058-f811-0432-6798a240c5d3.consul",
- "Roots": [
- {
- "ID": "31:6c:06:fb:49:94:42:d5:e4:55:cc:2e:27:b3:b2:2e:96:67:3e:7e",
- "Name": "Consul CA Root Cert",
- "SerialNumber": 7,
- "SigningKeyID": "19:45:8b:30:a1:45:84:ae:23:52:db:8d:1b:ff:a9:09:db:fc:2a:72:39:ae:da:11:53:f4:37:5c:de:d1:68:d8",
- "ExternalTrustDomain": "a1499528-fbf6-df7b-05e5-ae81e1873fc4",
- "NotBefore": "2018-06-06T17:35:25Z",
- "NotAfter": "2028-06-03T17:35:25Z",
- "RootCert": "-----BEGIN CERTIFICATE-----\nMIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg\nQ0EgNzAeFw0xODA2MDYxNzM1MjVaFw0yODA2MDMxNzM1MjVaMBYxFDASBgNVBAMT\nC0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgo09lpx63bHw\ncSXeeoSpHpHgyzX1Q8ewJ3RUg6Ie8Howbs/QBz1y/kGxsF35HXij3YrqhgQyPPx4\nbQ8FH2YR4aOCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/\nMGgGA1UdDgRhBF8xOTo0NTo4YjozMDphMTo0NTo4NDphZToyMzo1MjpkYjo4ZDox\nYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpkYToxMTo1MzpmNDozNzo1Yzpk\nZTpkMTo2ODpkODBqBgNVHSMEYzBhgF8xOTo0NTo4YjozMDphMTo0NTo4NDphZToy\nMzo1MjpkYjo4ZDoxYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpkYToxMTo1\nMzpmNDozNzo1YzpkZTpkMTo2ODpkODA/BgNVHREEODA2hjRzcGlmZmU6Ly8zNmNi\nNTJjZC00MDU4LWY4MTEtMDQzMi02Nzk4YTI0MGM1ZDMuY29uc3VsMD0GA1UdHgEB\n/wQzMDGgLzAtgiszNmNiNTJjZC00MDU4LWY4MTEtMDQzMi02Nzk4YTI0MGM1ZDMu\nY29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIHl6UDdouw8Fzn/oDHputAxt3UFbVg/U\nvC6jWPuqqMwmAiEAkvMadtwjtNU7m/AQRJrj1LeG3eXw7dWO8SlI2fEs0yY=\n-----END CERTIFICATE-----\n",
- "IntermediateCerts": null,
- "Active": true,
- "PrivateKeyType": "",
- "PrivateKeyBits": 0,
- "CreateIndex": 8,
- "ModifyIndex": 8
- }
- ]
-}
-```
-
-## CA Configuration
-
-After initialization, the CA provider configuration can be viewed with the
-[Get CA Configuration API endpoint](/consul/api-docs/connect/ca#get-ca-configuration).
-Consul will filter sensitive values from this endpoint depending on the
-provider in use, so the configuration may not be complete.
-
-```shell-session
-$ curl http://localhost:8500/v1/connect/ca/configuration
-{
- "Provider": "consul",
- "Config": {
- "LeafCertTTL": "72h",
- "IntermediateCertTTL": "8760h"
- },
- "CreateIndex": 5,
- "ModifyIndex": 5
-}
-```
-
-The CA provider can be reconfigured using the
-[Update CA Configuration API endpoint](/consul/api-docs/connect/ca#update-ca-configuration).
-Specific options for reconfiguration can be found in the specific
-CA provider documentation in the sidebar to the left.
-
-## Root Certificate Rotation
-
-Whenever the CA's configuration is updated in a way that causes the root key to
-change, a special rotation process will be triggered in order to smoothly
-transition to the new certificate. This rotation is automatically orchestrated
-by Consul.
-
-~> If the current CA Provider doesn't support cross-signing, this process can't
-be followed. See [Forced Rotation Without
-Cross-Signing](#forced-rotation-without-cross-signing).
-
-This also automatically occurs when a completely different CA provider is
-configured (since this changes the root key). Therefore, this automatic rotation
-process can also be used to cleanly transition between CA providers. For example,
-updating the service mesh to use Vault instead of the built-in CA.
-
-During rotation, an intermediate CA certificate is requested from the new root,
-which is then cross-signed by the old root. This cross-signed certificate is
-then distributed alongside any newly-generated leaf certificates used by the
-proxies once the new root becomes active, and provides a chain of trust back to
-the old root certificate in the event that a certificate signed by the new root
-is presented to a proxy that has not yet updated its bundle of trusted root CA
-certificates to include the new root.
-
-After the cross-signed certificate has been successfully generated and the new root
-certificate or CA provider has been set up, the new root becomes the active one
-and is immediately used for signing any new incoming certificate requests.
-
-If we check the [list CA roots
-endpoint](/consul/api-docs/connect/ca#list-ca-root-certificates) after updating the
-configuration with a new root certificate, we can see both the old and new root
-certificates are present, and the currently active root has an intermediate
-certificate which has been generated and cross-signed automatically by the old
-root during the rotation process:
-
-```shell-session
-$ curl localhost:8500/v1/connect/ca/roots
-{
- "ActiveRootID": "d2:2c:41:94:1e:50:04:ea:86:fc:08:d6:b0:45:a4:af:8a:eb:76:a0",
- "TrustDomain": "36cb52cd-4058-f811-0432-6798a240c5d3.consul",
- "Roots": [
- {
- "ID": "31:6c:06:fb:49:94:42:d5:e4:55:cc:2e:27:b3:b2:2e:96:67:3e:7e",
- "Name": "Consul CA Root Cert",
- "SerialNumber": 7,
- "SigningKeyID": "19:45:8b:30:a1:45:84:ae:23:52:db:8d:1b:ff:a9:09:db:fc:2a:72:39:ae:da:11:53:f4:37:5c:de:d1:68:d8",
- "ExternalTrustDomain": "a1499528-fbf6-df7b-05e5-ae81e1873fc4",
- "NotBefore": "2018-06-06T17:35:25Z",
- "NotAfter": "2028-06-03T17:35:25Z",
- "RootCert": "-----BEGIN CERTIFICATE-----\nMIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg\nQ0EgNzAeFw0xODA2MDYxNzM1MjVaFw0yODA2MDMxNzM1MjVaMBYxFDASBgNVBAMT\nC0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgo09lpx63bHw\ncSXeeoSpHpHgyzX1Q8ewJ3RUg6Ie8Howbs/QBz1y/kGxsF35HXij3YrqhgQyPPx4\nbQ8FH2YR4aOCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/\nMGgGA1UdDgRhBF8xOTo0NTo4YjozMDphMTo0NTo4NDphZToyMzo1MjpkYjo4ZDox\nYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpkYToxMTo1MzpmNDozNzo1Yzpk\nZTpkMTo2ODpkODBqBgNVHSMEYzBhgF8xOTo0NTo4YjozMDphMTo0NTo4NDphZToy\nMzo1MjpkYjo4ZDoxYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpkYToxMTo1\nMzpmNDozNzo1YzpkZTpkMTo2ODpkODA/BgNVHREEODA2hjRzcGlmZmU6Ly8zNmNi\nNTJjZC00MDU4LWY4MTEtMDQzMi02Nzk4YTI0MGM1ZDMuY29uc3VsMD0GA1UdHgEB\n/wQzMDGgLzAtgiszNmNiNTJjZC00MDU4LWY4MTEtMDQzMi02Nzk4YTI0MGM1ZDMu\nY29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIHl6UDdouw8Fzn/oDHputAxt3UFbVg/U\nvC6jWPuqqMwmAiEAkvMadtwjtNU7m/AQRJrj1LeG3eXw7dWO8SlI2fEs0yY=\n-----END CERTIFICATE-----\n",
- "IntermediateCerts": null,
- "Active": false,
- "PrivateKeyType": "",
- "PrivateKeyBits": 0,
- "CreateIndex": 8,
- "ModifyIndex": 24
- },
- {
- "ID": "d2:2c:41:94:1e:50:04:ea:86:fc:08:d6:b0:45:a4:af:8a:eb:76:a0",
- "Name": "Consul CA Root Cert",
- "SerialNumber": 16238269036752183483,
- "SigningKeyID": "",
- "ExternalTrustDomain": "a1499528-fbf6-df7b-05e5-ae81e1873fc4",
- "NotBefore": "2018-06-06T17:37:03Z",
- "NotAfter": "2028-06-03T17:37:03Z",
- "RootCert": "-----BEGIN CERTIFICATE-----\nMIIDijCCAnKgAwIBAgIJAOFZ66em1qC7MA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV\nBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNp\nc2NvMRIwEAYDVQQKDAlIYXNoaUNvcnAxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0x\nODA2MDYxNzM3MDNaFw0yODA2MDMxNzM3MDNaMGIxCzAJBgNVBAYTAlVTMRMwEQYD\nVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRIwEAYDVQQK\nDAlIYXNoaUNvcnAxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAK6ostXN6W093EpI3RDNQDwC1Gq3lPNoodL5XRaVVIBU\n3X5iC+Ttk02p67cHUguh4ZrWr3o3Dzxm+gKK0lfZLW0nNYNPAIGZWQD9zVSx1Lqt\n8X0pd+fhMV5coQrh3YIG/vy17IBTSBuRUX0mXOKjOeJJlrw1HQZ8pfm7WX6LFul2\nXszvgn5K1XR+9nhPy6K2bv99qsY0sm7AqCS2BjYBW8QmNngJOdLPdhyFh7invyXe\nPqgujc/KoA3P6e3/G7bJZ9+qoQMK8uwD7PxtA2hdQ9t0JGPsyWgzhwfBxWdBWRzV\nRvVi6Yu2tvw3QrjdeKQ5Ouw9FUb46VnTU7jTO974HjkCAwEAAaNDMEEwPwYDVR0R\nBDgwNoY0c3BpZmZlOi8vMzZjYjUyY2QtNDA1OC1mODExLTA0MzItNjc5OGEyNDBj\nNWQzLmNvbnN1bDANBgkqhkiG9w0BAQsFAAOCAQEATHgCro9VXj7JbH/tlB6f/KWf\n7r98+rlUE684ZRW9XcA9uUA6y265VPnemsC/EykPsririoh8My1jVPuEfgMksR39\n9eMDJKfutvSpLD1uQqZE8hu/hcYyrmQTFKjW71CfGIl/FKiAg7wXEw2ljLN9bxNv\nGG118wrJyMZrRvFjC2QKY025QQSJ6joNLFMpftsZrJlELtRV+nx3gMabpiDRXhIw\nJM6ti26P1PyVgGRPCOG10v+OuUtwe0IZoOqWpPJN8jzSuqZWf99uolkG0xuqLNz6\nd8qvTp1YF9tTmysgvdeGALez/02HTF035RVTsQfH9tM/+4yG1UnmjLpz3p4Fow==\n-----END CERTIFICATE-----",
- "IntermediateCerts": [
- "-----BEGIN CERTIFICATE-----\nMIIDTzCCAvWgAwIBAgIBFzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg\nQ0EgNzAeFw0xODA2MDYxNzM3MDNaFw0yODA2MDMxNzM3MDNaMGIxCzAJBgNVBAYT\nAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv\nMRIwEAYDVQQKDAlIYXNoaUNvcnAxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK6ostXN6W093EpI3RDNQDwC1Gq3lPNo\nodL5XRaVVIBU3X5iC+Ttk02p67cHUguh4ZrWr3o3Dzxm+gKK0lfZLW0nNYNPAIGZ\nWQD9zVSx1Lqt8X0pd+fhMV5coQrh3YIG/vy17IBTSBuRUX0mXOKjOeJJlrw1HQZ8\npfm7WX6LFul2Xszvgn5K1XR+9nhPy6K2bv99qsY0sm7AqCS2BjYBW8QmNngJOdLP\ndhyFh7invyXePqgujc/KoA3P6e3/G7bJZ9+qoQMK8uwD7PxtA2hdQ9t0JGPsyWgz\nhwfBxWdBWRzVRvVi6Yu2tvw3QrjdeKQ5Ouw9FUb46VnTU7jTO974HjkCAwEAAaOC\nARswggEXMGgGA1UdDgRhBF8xOTo0NTo4YjozMDphMTo0NTo4NDphZToyMzo1Mjpk\nYjo4ZDoxYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpkYToxMTo1MzpmNDoz\nNzo1YzpkZTpkMTo2ODpkODBqBgNVHSMEYzBhgF8xOTo0NTo4YjozMDphMTo0NTo4\nNDphZToyMzo1MjpkYjo4ZDoxYjpmZjphOTowOTpkYjpmYzoyYTo3MjozOTphZTpk\nYToxMTo1MzpmNDozNzo1YzpkZTpkMTo2ODpkODA/BgNVHREEODA2hjRzcGlmZmU6\nLy8zNmNiNTJjZC00MDU4LWY4MTEtMDQzMi02Nzk4YTI0MGM1ZDMuY29uc3VsMAoG\nCCqGSM49BAMCA0gAMEUCIBp46tRDot7GFyDXu7egq7lXBvn+UUHD5MmlFvdWmtnm\nAiEAwKBzEMcLd5kCBgFHNGyksRAMh/AGdEW859aL6z0u4gM=\n-----END CERTIFICATE-----\n"
- ],
- "Active": true,
- "PrivateKeyType": "",
- "PrivateKeyBits": 0,
- "CreateIndex": 24,
- "ModifyIndex": 24
- }
- ]
-}
-```
-
-The old root certificate will be automatically removed once enough time has elapsed
-for any leaf certificates signed by it to expire.
-
-### Forced Rotation Without Cross-Signing
-
-If the CA provider that is currently in use does not support cross-signing, then
-attempts to change the root key or CA provider will fail. This is to ensure
-operators don't make the change without understanding that there is additional
-risk involved.
-
-It is possible to force the change to happen anyway by setting the
-`ForceWithoutCrossSigning` field in the CA configuration to `true`.
-
-The downside is that all new certificates will immediately start being signed
-with the new root key, but it will take some time for agents throughout the
-cluster to observe the root CA change and reconfigure applications and proxies
-to accept certificates signed by this new root. This will mean connections made
-with a new certificate may fail for a short period after the CA change.
-
-Typically all connected agents will have observed the new roots within seconds
-even in a large deployment so the impact should be contained. But it is possible
-for a disconnected, overloaded or misconfigured agent to not see the new root
-for an unbounded amount of time during which new connections to services on that
-host will fail. The issue will resolve as soon as the agent can reconnect to
-servers.
-
-Currently both Consul and Vault CA providers _do_ support cross signing. As more
-providers are added this documentation will list any that this section applies
-to.
-
-### Recovering From Expired Certificates
-If the built-in CA provider is misconfigured or unavailable, Consul service mesh requests eventually
-stop functioning due to expiration of intermediate and root certificates. To recover manually, use the
-[CLI helper](/consul/commands/tls/ca#consul-tls-ca-create) to generate CA certificates.
-
-
-#### Example - Regenerating the built in CA
-```shell-session
-$ consul tls ca create -cluster-id test -common-name "Consul Agent CA" -days=365 -domain consul
- ==> Saved consul-agent-ca.pem
- ==> Saved consul-agent-ca-key.pem
-```
-The example above generates a new CA with a validity of 365 days. The cluster-id argument is specific
-to each cluster and can be looked up by examining the `TrustDomain` field in
-the [List CA Roots](/consul/api-docs/connect/ca#list-ca-root-certificates) endpoint.
-
-The contents of the generated cert and private key files from the above step should then be used with
-the [Update CA Configuration](/consul/api-docs/connect/ca#update-ca-configuration) endpoint. Once the CA configuration is
-updated on the primary datacenter, all secondary datacenters will pick up the changes and regenerate their intermediate
-and leaf certificates, after which any new requests that require certificate verification will succeed.
diff --git a/website/content/docs/connect/cluster-peering/index.mdx b/website/content/docs/connect/cluster-peering/index.mdx
deleted file mode 100644
index 83cc4b97e4c4..000000000000
--- a/website/content/docs/connect/cluster-peering/index.mdx
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: docs
-page_title: Cluster Peering Overview
-description: >-
- Cluster peering establishes communication between independent clusters in Consul, allowing services to interact across datacenters. Learn how cluster peering works, its differences with WAN federation for multi-datacenter deployments, and how to troubleshoot common issues.
----
-
-# Cluster peering overview
-
-This topic provides an overview of cluster peering, which lets you connect two or more independent Consul clusters so that services deployed to different partitions or datacenters can communicate.
-Cluster peering is enabled in Consul by default. For specific information about cluster peering configuration and usage, refer to following pages.
-
-## What is cluster peering?
-
-Consul supports cluster peering connections between two [admin partitions](/consul/docs/enterprise/admin-partitions) _in different datacenters_. Deployments without an Enterprise license can still use cluster peering because every datacenter automatically includes a default partition. Meanwhile, admin partitions _in the same datacenter_ do not require cluster peering connections because you can export services between them without generating or exchanging a peering token.
-
-The following diagram describes Consul's cluster peering architecture.
-
-
-
-In this diagram, the `default` partition in Consul DC 1 has a cluster peering connection with the `web` partition in Consul DC 2. Enforced by their respective mesh gateways, this cluster peering connection enables `Service B` to communicate with `Service C` as a service upstream.
-
-Cluster peering leverages several components of Consul's architecture to enforce secure communication between services:
-
-- A _peering token_ contains an embedded secret that securely establishes communication when shared symmetrically between datacenters. Sharing this token enables each datacenter's server agents to recognize requests from authorized peers, similar to how the [gossip encryption key secures agent LAN gossip](/consul/docs/security/encryption#gossip-encryption).
-- A _mesh gateway_ encrypts outgoing traffic, decrypts incoming traffic, and directs traffic to healthy services. Consul's service mesh features must be enabled in order to use mesh gateways. Mesh gateways support the specific admin partitions they are deployed on. Refer to [Mesh gateways](/consul/docs/connect/gateways/mesh-gateway) for more information.
-- An _exported service_ communicates with downstreams deployed in other admin partitions. They are explicitly defined in an [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services).
-- A _service intention_ secures [service-to-service communication in a service mesh](/consul/docs/connect/intentions). Intentions enable identity-based access between services by exchanging TLS certificates, which the service's sidecar proxy verifies upon each request.
-
-### Compared with WAN federation
-
-WAN federation and cluster peering are different ways to connect services through mesh gateways so that they can communicate across datacenters. WAN federation connects multiple datacenters to make them function as if they were a single cluster, while cluster peering treats each datacenter as a separate cluster. As a result, WAN federation requires a primary datacenter to maintain and replicate global states such as ACLs and configuration entries, but cluster peering does not.
-
-WAN federation and cluster peering also treat encrypted traffic differently. While mesh gateways between WAN federated datacenters use mTLS to keep data encrypted, mesh gateways between peers terminate mTLS sessions, decrypt data to HTTP services, and then re-encrypt traffic to send to services. Data must be decrypted in order to evaluate and apply dynamic routing rules at the destination cluster, which reduces coupling between peers.
-
-Regardless of whether you connect your clusters through WAN federation or cluster peering, human and machine users can use either method to discover services in other clusters or dial them through the service mesh.
-
-| | WAN Federation | Cluster Peering |
-| :------------------------------------------------- | :------------: | :-------------: |
-| Connects clusters across datacenters | ✅ | ✅ |
-| Shares support queries and service endpoints | ✅ | ✅ |
-| Connects clusters owned by different operators | ❌ | ✅ |
-| Functions without declaring primary datacenter | ❌ | ✅ |
-| Can use sameness groups for identical services | ❌ | ✅ |
-| Replicates exported services for service discovery | ❌ | ✅ |
-| Gossip protocol: Requires LAN gossip only | ❌ | ✅ |
-| Forwards service requests for service discovery | ✅ | ❌ |
-| Can replicate ACL tokens, policies, and roles | ✅ | ❌ |
-
-## Guidance
-
-The following resources are available to help you use Consul's cluster peering features.
-
-### Tutorials
-
-- To learn how to peer clusters and connect services across peers in AWS Elastic Kubernetes Service (EKS) and Google Kubernetes Engine (GKE) environments, complete the [Connect services between Consul datacenters with cluster peering tutorial](/consul/tutorials/developer-mesh/cluster-peering).
-
-### Usage documentation
-
-- [Establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering)
-- [Manage cluster peering connections](/consul/docs/connect/cluster-peering/usage/manage-connections)
-- [Manage L7 traffic with cluster peering](/consul/docs/connect/cluster-peering/usage/peering-traffic-management)
-- [Create sameness groups](/consul/docs/connect/cluster-peering/usage/create-sameness-groups)
-
-### Kubernetes documentation
-
-- [Cluster peering on Kubernetes technical specifications](/consul/docs/k8s/connect/cluster-peering/tech-specs)
-- [Establish cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering)
-- [Manage cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/manage-peering)
-- [Manage L7 traffic with cluster peering on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/l7-traffic)
-- [Create sameness groups on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/create-sameness-groups)
-
-### Reference documentation
-
-- [Cluster peering technical specifications](/consul/docs/connect/cluster-peering/tech-specs)
-- [HTTP API reference: `/peering/` endpoint](/consul/api-docs/peering)
-- [CLI reference: `peering` command](/consul/commands/peering).
-
-## Basic troubleshooting
-
-If you experience errors when using Consul's cluster peering features, refer to the following list of technical constraints.
-
-- Peer names can only contain lowercase characters.
-- Services with node, instance, and check definitions totaling more than 8MB cannot be exported to a peer.
-- Two admin partitions in the same datacenter cannot be peered. Use the [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services#exporting-services-to-peered-clusters) instead.
-- To manage intentions that specify services in peered clusters, use [configuration entries](/consul/docs/connect/config-entries/service-intentions). The `consul intention` CLI command is not supported.
-- The Consul UI does not support exporting services between clusters or creating service intentions. Use either the API or the CLI to complete these required steps when establishing new cluster peering connections.
-- Accessing key/value stores across peers is not supported.
diff --git a/website/content/docs/connect/cluster-peering/tech-specs.mdx b/website/content/docs/connect/cluster-peering/tech-specs.mdx
deleted file mode 100644
index 36c7dc9d9130..000000000000
--- a/website/content/docs/connect/cluster-peering/tech-specs.mdx
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: docs
-page_title: Cluster Peering Technical Specifications
-description: >-
- Cluster peering connections in Consul interact with mesh gateways, sidecar proxies, exported services, and ACLs. Learn about the configuration requirements for these components.
----
-
-# Cluster peering technical specifications
-
-This reference topic describes the technical specifications associated with using cluster peering in your deployments. These specifications include required Consul components and their configurations. To learn more about Consul's cluster peering feature, refer to [cluster peering overview](/consul/docs/connect/cluster-peering).
-
-For cluster peering requirements in Kubernetes deployments, refer to [cluster peering on Kubernetes technical specifications](/consul/docs/k8s/connect/cluster-peering/tech-specs).
-
-## Requirements
-
-Consul's default configuration supports cluster peering connections directly between clusters. In production environments, we recommend using mesh gateways to securely route service mesh traffic between partitions with cluster peering connections.
-
-In addition, make sure your Consul environment meets the following prerequisites:
-
-- Consul v1.14 or higher.
-- Use [Envoy proxies](/consul/docs/connect/proxies/envoy). Envoy is the only proxy with mesh gateway capabilities in Consul.
-- A local Consul agent is required to manage mesh gateway configurations.
-
-## Mesh gateway specifications
-
-To change Consul's default configuration and enable cluster peering through mesh gateways, use a mesh configuration entry to update your network's service mesh proxies globally:
-
-1. In a `mesh` configuration entry, set `PeerThroughMeshGateways` to `true`:
-
-
-
- ```hcl
- Kind = "mesh"
- Peering {
- PeerThroughMeshGateways = true
- }
- ```
-
-
-
-1. Write the configuration entry to Consul:
-
- ```shell
- $ consul config write mesh-config.hcl
- ```
-
-When cluster peering through mesh gateways, consider the following deployment requirements:
-
-- A cluster requires a registered mesh gateway in order to export services to peers in other regions or cloud providers.
-- The mesh gateway must also be registered in the same admin partition as the exported services and their `exported-services` configuration entry. An enterprise license is required to use multiple admin partitions with a single cluster of Consul servers.
-- To use the `local` mesh gateway mode, you must register a mesh gateway in the importing cluster.
-- Define the `Proxy.Config` settings using opaque parameters compatible with your proxy. Refer to the [Gateway options](/consul/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional Envoy proxy configuration information.
-
-### Mesh gateway modes
-
-By default, cluster peering connections use mesh gateways in [remote mode](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters#remote). Be aware of these additional requirements when changing a mesh gateway's mode.
-
-- For mesh gateways that connect peered clusters, you can set the `mode` as either `remote` or `local`.
-- The `none` mode is invalid for mesh gateways with cluster peering connections.
-
-Refer to [mesh gateway modes](/consul/docs/connect/gateways/mesh-gateway#modes) for more information.
-
-## Sidecar proxy specifications
-
-The Envoy proxies that function as sidecars in your service mesh require configuration in order to properly route traffic to peers. Sidecar proxies are defined in the [service definition](/consul/docs/services/usage/define-services).
-
-- Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and peer. Refer to the [`upstreams`](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference) documentation for details.
-- The `proxy.upstreams.destination_name` parameter is always required.
-- The `proxy.upstreams.destination_peer` parameter must be configured to enable cross-cluster traffic.
-- The `proxy.upstream/destination_namespace` configuration is only necessary if the destination service is in a non-default namespace.
-
-## Exported service specifications
-
-The `exported-services` configuration entry is required in order for services to communicate across partitions with cluster peering connections. Basic guidance on using the `exported-services` configuration entry is included in [Establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering#export-services-between-clusters).
-
-Refer to the [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services) reference for more information.
-
-## ACL specifications
-
-If ACLs are enabled, you must add tokens to grant the following permissions:
-
-- Grant `service:write` permissions to services that define mesh gateways in their server definition.
-- Grant `service:read` permissions for all services on the partition.
-- Grant `mesh:write` permissions to the mesh gateways that participate in cluster peering connections. This permission allows a leaf certificate to be issued for mesh gateways to terminate TLS sessions for HTTP requests.
\ No newline at end of file
diff --git a/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx b/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx
deleted file mode 100644
index 71537355eb90..000000000000
--- a/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx
+++ /dev/null
@@ -1,306 +0,0 @@
----
-page_title: Create sameness groups
-description: |-
- Learn how to create sameness groups between partitions and cluster peers so that Consul can identify instances of the same service across partitions and datacenters.
----
-
-# Create sameness groups
-
-This topic describes how to create a sameness group, which designates a set of admin partitions as functionally identical in your network. Adding an admin partition to a sameness group enables Consul to recognize services registered to remote partitions with cluster peering connections as instances of the same service when they share a name and namespace.
-
-For information about configuring a failover strategy using sameness groups, refer to [Failover with sameness groups](/consul/docs/connect/manage-traffic/failover/sameness).
-
-## Workflow
-
-Sameness groups are a user-defined set of partitions with identical configurations, including configuration entries for service and proxy defaults. Partitions on separate clusters should have an established cluster peering connection in order to recognize each other.
-
-To create and use sameness groups in your network, complete the following steps:
-
-- **Create sameness group configuration entries for each member of the group**. For each partition that you want to include in the sameness group, you must write and apply a sameness group configuration entry that defines the group’s members from that partition’s perspective. Refer to the [sameness group configuration entry reference](/consul/docs/connect/config-entries/sameness-group) for details on configuration hierarchy, default values, and specifications.
-- **Export services to members of the sameness group**. You must write and apply an exported services configuration entry that makes the partition’s services available to other members of the group. Refer to [exported services configuration entry reference](/consul/docs/connect/config-entries/exported-services) for additional specification information.
-- **Create service intentions to authorize other members of the sameness group**. For each partition that you want to include in the sameness group, you must write and apply service intentions configuration entries to authorize traffic to your services from all members of the group. Refer to the [service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional specification information.
-
-## Requirements
-
-- All datacenters where you want to create sameness groups must run Consul v1.16 or later. Refer to [upgrade instructions](/consul/docs/upgrading/instructions) for more information about how to upgrade your deployment.
-- A [Consul Enterprise license](/consul/docs/enterprise/license/overview) is required.
-
-### Before you begin
-
-Before creating a sameness group, take the following actions to prepare your network:
-
-#### Check namespace and service naming conventions
-
-Sameness groups are defined at the partition level. Consul assumes all partitions in the group have identical configurations, including identical service names and identical namespaces. This behavior occurs even when partitions in the group contain functionally different services that share a common name and namespace. For example, if distinct services named `api` were registered to different members of a sameness group, it could lead to errors because requests may be sent to the incorrect service.
-
-To prevent errors, check the names of the services deployed to your network and the namespaces they are deployed in. Pay particular attention to the default namespace to confirm that services have unique names. If different services share a name, you should either change one of the service’s names or deploy one of the services to a different namespace.
-
-#### Deploy mesh gateways for each partition
-
-Mesh gateways are required for cluster peering connections and recommended to secure cross-partition traffic in a single datacenter. Therefore, we recommend securing your network, and especially your production environment, by deploying mesh gateways to each datacenter. Refer to [mesh gateways specifications](/consul/docs/connect/cluster-peering/tech-specs#mesh-gateway-specifications) for more information about configuring mesh gateways.
-
-#### Establish cluster peering relationships between remote partitions
-
-You must establish connections with cluster peers before you can create a sameness group that includes them. A cluster peering connection exists between two admin partitions in different datacenters, and each connection between two partitions must be established separately with each peer. Refer to [establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering) for step-by-step instructions.
-
-To establish cluster peering connections and define a group as part of the same workflow, follow instructions up to [Export services between clusters](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering#export-services-between-clusters). You can use the same exported services and service intention configuration entries to establish the cluster peering connection and create the sameness group.
-
-## Create a sameness group
-
-To create a sameness group, you must write and apply a set of three configuration entries for each partition that is a member of the group:
-
-- Sameness group configuration entries: Defines the sameness group from each partition’s perspective.
-- Exported services configuration entries: Makes services available to other partitions in the group.
-- Service intentions configuration entries: Authorizes traffic between services across partitions.
-
-### Define the sameness group from each partition’s perspective
-
-To define a sameness group for a partition, create a [sameness group configuration entry](/consul/docs/connect/config-entries/sameness-group) that describes the partitions and cluster peers that are part of the group. Typically, the order follows this pattern:
-
-1. The local partition
-1. Other partitions in the same datacenter
-1. Partitions with established cluster peering relationships
-
-If you want all services to failover to other instances in the sameness group by default, set `DefaultForFailover=true` and list the group members in the order you want to use in a failover scenario. Refer to [failover with sameness groups](/consul/docs/connect/manage-traffic/failover/sameness) for more information.
-
-Be aware that the sameness group configuration entries are different for each partition. The following example demonstrates how to format three different configuration entries for three partitions that are part of the sameness group `product-group` when Partition 1 and Partition 2 are in DC1, and the third partition is Partition 1 in DC2:
-
-
-
-
-
-```hcl
-Kind = "sameness-group"
-Name = "product-group"
-Partition = "partition-1"
-Members = [
- {Partition = "partition-1"},
- {Partition = "partition-2"},
- {Peer = "dc2-partition-1"}
- ]
-```
-
-
-
-
-
-```hcl
-Kind = "sameness-group"
-Name = "product-group"
-Partition = "partition-2"
-Members = [
- {Partition = "partition-2"},
- {Partition = "partition-1"},
- {Peer = "dc2-partition-1"}
- ]
-```
-
-
-
-
-
-```hcl
-Kind = "sameness-group"
-Name = "product-group"
-Partition = "partition-1"
-Members = [
- {Partition = "partition-1"},
- {Peer = "dc1-partition-1"},
- {Peer = "dc1-partition-2"}
- ]
-```
-
-
-
-
-
-After you create the configuration entry, apply it to the Consul server with the following CLI command:
-
-```shell-session
-$ consul config write product-group.hcl
-```
-
-Then, repeat the process to create and apply a configuration entry for every partition that is a member of the sameness group.
-
-### Export services to other partitions in the sameness group
-
-To make services available to other members of the sameness group, you must write and apply an [exported services configuration entry](/consul/docs/connect/config-entries/exported-services) to each partition in the group. This configuration entry exports the local partition's services to the rest of the group members. In each configuration entry, set the sameness group as the `Consumer` for the exported services. You can export multiple services in a single exported services configuration entry.
-
-Because you are configuring the consumer to reference the sameness group instead of listing out each partition and cluster peer, you do not need to edit this configuration again when you add a partition or peer to the group.
-
-The following example demonstrates how to format three different `exported-service` configuration entries to make a service named `api` deployed to the `store` namespace of each partition available to all other group members:
-
-
-
-
-
-```hcl
-Kind = "exported-services"
-Name = "product-sg-export"
-Partition = "partition-1"
-Services = [
- {
- Name = "api"
- Namespace = "store"
- Consumers = [
- {SamenessGroup="product-group"}
- ]
- }
- ]
-```
-
-
-
-
-
-```hcl
-Kind = "exported-services"
-Name = "product-sg-export"
-Partition = "partition-2"
-Services = [
- {
- Name = "api"
- Namespace = "store"
- Consumers = [
- {SamenessGroup="product-group"}
- ]
- }
- ]
-```
-
-
-
-
-
-```hcl
-Kind = "exported-services"
-Name = "product-sg-export"
-Partition = "partition-1"
-Services = [
- {
- Name = "api"
- Namespace = "store"
- Consumers = [
- {SamenessGroup="product-group"}
- ]
- }
- ]
-```
-
-
-
-
-
-For more information about exporting services, including examples of configuration entries that export multiple services at the same time, refer to the [exported services configuration entry reference](/consul/docs/connect/config-entries/exported-services).
-
-After you create each exported services configuration entry, apply it to the Consul server with the following CLI command:
-
-```shell-session
-$ consul config write product-sg-export.hcl
-```
-
-#### Export services for cluster peers and sameness groups as part of the same workflow
-
-Creating a cluster peering connection between two partitions and then adding the partitions to a sameness group requires that you write and apply two separate exported services configuration entries. One configuration entry exports services to the peer, and a second entry exports services to other members of the group.
-
-If your goal for peering clusters is to create a sameness group, you can write and apply a single exported services configuration entry by configuring the `Services[].Consumers` block with the `SamenessGroup` field instead of the `Peer` field.
-
-Be aware that this scenario requires you to write the `sameness-group` configuration entry to Consul before you apply the `exported-services` configuration entry that references the sameness group.
-
-### Create service intentions to authorize traffic between group members
-
-Exporting the service to other members of the sameness group makes the services visible to remote partitions, but you must also create service intentions so that local services are authorized to send and receive traffic from a member of the sameness group.
-
-For each partition that is member of the group, write and apply a [service intentions configuration entry](/consul/docs/connect/config-entries/service-intentions) that defines intentions for the services that are part of the group. In the `Sources` block of the configuration entry, include the service name, its namespace, the sameness group, and grant `allow` permissions.
-
-Because you are using the sameness group in the `Sources` block rather than listing out each partition and cluster peer, you do not have to make further edits to the service intentions configuration entries when members are added to or removed from the group.
-
-The following example demonstrates how to format three different `service-intentions` configuration entries to make a service named `api` available to all instances of `payments` deployed in all members of the sameness group including the local partition. In this example, `api` is deployed to the `store` namespace in all three partitions.
-
-
-
-
-
-
-```hcl
-Kind = "service-intentions"
-Name = "api-intentions"
-Namespace = "store"
-Partition = "partition-1"
-Sources = [
- {
- Name = "api"
- Action = "allow"
- Namespace = "store"
- SamenessGroup = "product-group"
- }
-]
-```
-
-
-
-
-
-```hcl
-Kind = "service-intentions"
-Name = "api-intentions"
-Namespace = "store"
-Partition = "partition-2"
-Sources = [
- {
- Name = "api"
- Action = "allow"
- Namespace = "store"
- SamenessGroup = "product-group"
- }
-]
-```
-
-
-
-
-
-```hcl
-Kind = "service-intentions"
-Name = "api-intentions"
-Namespace = "store"
-Partition = "partition-1"
-Sources = [
- {
- Name = "api"
- Action = "allow"
- Namespace = "store"
- SamenessGroup = "product-group"
- }
-]
-```
-
-
-
-
-
-Refer to [create and manage intentions](/consul/docs/connect/intentions/create-manage-intentions) for more information about how to create and apply service intentions in Consul.
-
-After you create each service intentions configuration entry, apply it to the Consul server with the following CLI command:
-
-```shell-session
-$ consul config write api-intentions.hcl
-```
-
-#### Create service intentions for cluster peers and sameness groups as part of the same workflow
-
-Creating a cluster peering connection between two remote partitions and then adding the partitions to a sameness group requires that you write and apply two separate service intention configuration entries. One configuration entry authorizes services to the peer, and a second entry authorizes services to other members of the group.
-
-If you are peering clusters with the goal of creating a sameness group, it is possible to combine these workflows by using a single service intentions configuration entry.
-
-Configure the `Sources` block with the `SamenessGroup` field instead of the `Peer` field. Be aware that this scenario requires you to write the `sameness-group` configuration entry to Consul before you apply the `service-intentions` configuration entry that references the sameness group.
-
-## Next steps
-
-When `DefaultForFailover=true` in a sameness group configuration entry, additional upstream configuration is not required.
-
-After creating a sameness group, you can use them with static Consul DNS lookups and dynamic DNS lookups (prepared queries) for service discovery use cases. You can also set up failover between services in a sameness group. Refer to the following topics for more details:
-
-- [Static Consul DNS lookups](/consul/docs/services/discovery/dns-static-lookups)
-- [Dynamic Consul DNS lookups](/consul/docs/services/discovery/dns-dynamic-lookups)
-- [Failover overview](/consul/docs/connect/manage-traffic/failover)
diff --git a/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx b/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx
deleted file mode 100644
index b2ed30f46058..000000000000
--- a/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx
+++ /dev/null
@@ -1,269 +0,0 @@
----
-layout: docs
-page_title: Establish Cluster Peering Connections
-description: >-
- Generate a peering token to establish communication, export services, and authorize requests for cluster peering connections. Learn how to establish peering connections with Consul's HTTP API, CLI or UI.
----
-
-# Establish cluster peering connections
-
-This page details the process for establishing a cluster peering connection between services deployed to different datacenters. You can interact with Consul's cluster peering features using the CLI, the HTTP API, or the UI. The overall process for establishing a cluster peering connection consists of the following steps:
-
-1. Create a peering token in one cluster.
-1. Use the peering token to establish peering with a second cluster.
-1. Export services between clusters.
-1. Create intentions to authorize services for peers.
-
-Cluster peering between services cannot be established until all four steps are complete. If you want to establish cluster peering connections and create sameness groups at the same time, refer to the guidance in [create sameness groups](/consul/docs/connect/cluster-peering/usage/create-sameness-groups).
-
-For Kubernetes guidance, refer to [Establish cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering).
-
-## Requirements
-
-You must meet the following requirements to use cluster peering:
-
-- Consul v1.14.1 or higher
-- Services hosted in admin partitions on separate datacenters
-
-If you need to make services available to an admin partition in the same datacenter, do not use cluster peering. Instead, use the [`exported-services` configuration entry](/consul/docs/connect/config-entries/exported-services) to make service upstreams available to other admin partitions in a single datacenter.
-
-### Mesh gateway requirements
-
-Consul's default configuration supports cluster peering connections directly between clusters. In production environments, we recommend using mesh gateways to securely route service mesh traffic between partitions with cluster peering connections.
-
-To enable cluster peering through mesh gateways and configure mesh gateways to support cluster peering, refer to [mesh gateway specifications](/consul/docs/connect/cluster-peering/tech-specs#mesh-gateway-specifications).
-
-## Create a peering token
-
-To begin the cluster peering process, generate a peering token in one of your clusters. The other cluster uses this token to establish the peering connection.
-
-Every time you generate a peering token, a single-use secret for establishing the secret is embedded in the token. Because regenerating a peering token invalidates the previously generated secret, you must use the most recently created token to establish peering connections.
-
-
-
-
-1. In `cluster-01`, use the [`consul peering generate-token` command](/consul/commands/peering/generate-token) to issue a request for a peering token.
-
- ```shell-session
- $ consul peering generate-token -name cluster-02
- ```
-
- The CLI outputs the peering token, which is a base64-encoded string containing the token details.
-
-1. Save this value to a file or clipboard to use in the next step on `cluster-02`.
-
-
-
-
-1. In `cluster-01`, use the [`/peering/token` endpoint](/consul/api-docs/peering#generate-a-peering-token) to issue a request for a peering token.
-
- ```shell-session
- $ curl --request POST --data '{"Peer":"cluster-02"}' --url http://localhost:8500/v1/peering/token
- ```
-
- The CLI outputs the peering token, which is a base64-encoded string containing the token details.
-
-1. Create a JSON file that contains the first cluster's name and the peering token.
-
-
-
- ```json
- {
- "Peer": "cluster-01",
- "PeeringToken": "eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhZG1pbiIsImF1ZCI6IlNvbHIifQ.5T7L_L1MPfQ_5FjKGa1fTPqrzwK4bNSM812nW6oyjb8"
- }
- ```
-
-
-
-
-
-
-To begin the cluster peering process, generate a peering token in one of your clusters. The other cluster uses this token to establish the peering connection.
-
-Every time you generate a peering token, a single-use secret for establishing the secret is embedded in the token. Because regenerating a peering token invalidates the previously generated secret, you must use the most recently created token to establish peering connections.
-
-1. In the Consul UI for the datacenter associated with `cluster-01`, click **Peers**.
-1. Click **Add peer connection**.
-1. In the **Generate token** tab, enter `cluster-02` in the **Name of peer** field.
-1. Click the **Generate token** button.
-1. Copy the token before you proceed. You cannot view it again after leaving this screen. If you lose your token, you must generate a new one.
-
-
-
-
-## Establish a connection between clusters
-
-Next, use the peering token to establish a secure connection between the clusters.
-
-
-
-
-1. In one of the client agents deployed to "cluster-02," issue the [`consul peering establish` command](/consul/commands/peering/establish) and specify the token generated in the previous step.
-
- ```shell-session
- $ consul peering establish -name cluster-01 -peering-token token-from-generate
- "Successfully established peering connection with cluster-01"
- ```
-
-When you connect server agents through cluster peering, they peer their default partitions. To establish peering connections for other partitions through server agents, you must add the `-partition` flag to the `establish` command and specify the partitions you want to peer. For additional configuration information, refer to [`consul peering establish` command](/consul/commands/peering/establish).
-
-You can run the `peering establish` command once per peering token. Peering tokens cannot be reused after being used to establish a connection. If you need to re-establish a connection, you must generate a new peering token.
-
-
-
-
-1. In one of the client agents in "cluster-02," use `peering_token.json` and the [`/peering/establish` endpoint](/consul/api-docs/peering#establish-a-peering-connection) to establish the peering connection. This endpoint does not generate an output unless there is an error.
-
- ```shell-session
- $ curl --request POST --data @peering_token.json http://127.0.0.1:8500/v1/peering/establish
- ```
-
-When you connect server agents through cluster peering, their default behavior is to peer to the `default` partition. To establish peering connections for other partitions through server agents, you must add the `Partition` field to `peering_token.json` and specify the partitions you want to peer. For additional configuration information, refer to [Cluster Peering - HTTP API](/consul/api-docs/peering).
-
-You can dial the `peering/establish` endpoint once per peering token. Peering tokens cannot be reused after being used to establish a connection. If you need to re-establish a connection, you must generate a new peering token.
-
-
-
-
-
-1. In the Consul UI for the datacenter associated with `cluster 02`, click **Peers** and then **Add peer connection**.
-1. Click **Establish peering**.
-1. In the **Name of peer** field, enter `cluster-01`. Then paste the peering token in the **Token** field.
-1. Click **Add peer**.
-
-
-
-
-## Export services between clusters
-
-After you establish a connection between the clusters, you need to create an `exported-services` configuration entry that defines the services that are available for other clusters. Consul uses this configuration entry to advertise service information and support service mesh connections across clusters.
-
-An `exported-services` configuration entry makes services available to another admin partition. While it can target admin partitions either locally or remotely. Clusters peers always export services to remote partitions. Refer to [exported service consumers](/consul/docs/connect/config-entries/exported-services#consumers-1) for more information.
-
-You must use the Consul CLI to complete this step. The HTTP API and the Consul UI do not support `exported-services` configuration entries.
-
-
-
-
-1. Create a configuration entry and specify the `Kind` as `"exported-services"`.
-
-
-
- ```hcl
- Kind = "exported-services"
- Name = "default"
- Services = [
- {
- ## The name and namespace of the service to export.
- Name = "service-name"
- Namespace = "default"
-
- ## The list of peer clusters to export the service to.
- Consumers = [
- {
- ## The peer name to reference in config is the one set
- ## during the peering process.
- Peer = "cluster-02"
- }
- ]
- }
- ]
- ```
-
-
-
-1. Add the configuration entry to your cluster.
-
- ```shell-session
- $ consul config write peering-config.hcl
- ```
-
-Before you proceed, wait for the clusters to sync and make services available to their peers. To check the peered cluster status, [read the cluster peering connection](/consul/docs/connect/cluster-peering/usage/manage-connections#read-a-peering-connection).
-
-
-
-
-## Authorize services for peers
-
-Before you can call services from peered clusters, you must set service intentions that authorize those clusters to use specific services. Consul prevents services from being exported to unauthorized clusters.
-
-You must use the HTTP API or the Consul CLI to complete this step. The Consul UI supports intentions for local clusters only.
-
-
-
-
-1. Create a configuration entry and specify the `Kind` as `"service-intentions"`. Declare the service on "cluster-02" that can access the service in "cluster-01." In the following example, the service intentions configuration entry authorizes the `backend-service` to communicate with the `frontend-service` that is hosted on remote peer `cluster-02`:
-
-
-
- ```hcl
- Kind = "service-intentions"
- Name = "backend-service"
-
- Sources = [
- {
- Name = "frontend-service"
- Peer = "cluster-02"
- Action = "allow"
- }
- ]
- ```
-
-
-
- If the peer's name is not specified in `Peer`, then Consul assumes that the service is in the local cluster.
-
-1. Add the configuration entry to your cluster.
-
- ```shell-session
- $ consul config write peering-intentions.hcl
- ```
-
-
-
-
-1. Create a configuration entry and specify the `Kind` as `"service-intentions"`. Declare the service on "cluster-02" that can access the service in "cluster-01." In the following example, the service intentions configuration entry authorizes the `backend-service` to communicate with the `frontend-service` that is hosted on remote peer `cluster-02`:
-
-
-
- ```hcl
- Kind = "service-intentions"
- Name = "backend-service"
-
- Sources = [
- {
- Name = "frontend-service"
- Peer = "cluster-02"
- Action = "allow"
- }
- ]
- ```
-
-
-
- If the peer's name is not specified in `Peer`, then Consul assumes that the service is in the local cluster.
-
-1. Add the configuration entry to your cluster.
-
- ```shell-session
- $ curl --request PUT --data @peering-intentions.hcl http://127.0.0.1:8500/v1/config
- ```
-
-
-
-
-### Authorize service reads with ACLs
-
-If ACLs are enabled on a Consul cluster, sidecar proxies that access exported services as an upstream must have an ACL token that grants read access.
-
-Read access to all imported services is granted using either of the following rules associated with an ACL token:
-
-- `service:write` permissions for any service in the sidecar's partition.
-- `service:read` and `node:read` for all services and nodes, respectively, in sidecar's namespace and partition.
-
-For Consul Enterprise, the permissions apply to all imported services in the service's partition. These permissions are satisfied when using a [service identity](/consul/docs/security/acl/acl-roles#service-identities).
-
-Refer to [Reading services](/consul/docs/connect/config-entries/exported-services#reading-services) in the `exported-services` configuration entry documentation for example rules.
-
-For additional information about how to configure and use ACLs, refer to [ACLs system overview](/consul/docs/security/acl).
diff --git a/website/content/docs/connect/cluster-peering/usage/manage-connections.mdx b/website/content/docs/connect/cluster-peering/usage/manage-connections.mdx
deleted file mode 100644
index a4e92373328a..000000000000
--- a/website/content/docs/connect/cluster-peering/usage/manage-connections.mdx
+++ /dev/null
@@ -1,137 +0,0 @@
----
-layout: docs
-page_title: Manage Cluster Peering Connections
-description: >-
- Learn how to list, read, and delete cluster peering connections using Consul. You can use the HTTP API, the CLI, or the Consul UI to manage cluster peering connections.
----
-
-# Manage cluster peering connections
-
-This usage topic describes how to manage cluster peering connections using the CLI, the HTTP API, and the UI.
-
-After you establish a cluster peering connection, you can get a list of all active peering connections, read a specific peering connection's information, and delete peering connections.
-
-For Kubernetes-specific guidance for managing cluster peering connections, refer to [Manage cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/manage-peering).
-
-## List all peering connections
-
-You can list all active peering connections in a cluster.
-
-
-
-
- ```shell-session
- $ consul peering list
- Name State Imported Svcs Exported Svcs Meta
- cluster-02 ACTIVE 0 2 env=production
- cluster-03 PENDING 0 0
- ```
-
-For more information, including optional flags and parameters, refer to the [`consul peering list` CLI command reference](/consul/commands/peering/list).
-
-
-
-
-The following example shows how to format an API request to list peering connections:
-
- ```shell-session
- $ curl --header "X-Consul-Token: 0137db51-5895-4c25-b6cd-d9ed992f4a52" http://127.0.0.1:8500/v1/peerings
- ```
-
-For more information, including optional parameters and sample responses, refer to the [`/peering` endpoint reference](/consul/api-docs/peering#list-all-peerings).
-
-
-
-
-In the Consul UI, click **Peers**.
-
-The UI lists peering connections you created for clusters in a datacenter. The name that appears in the list is the name of the cluster in a different datacenter with an established peering connection.
-
-
-
-
-## Read a peering connection
-
-You can get information about individual peering connections between clusters.
-
-
-
-
-
-The following example outputs information about a peering connection locally referred to as "cluster-02":
-
- ```shell-session
- $ consul peering read -name cluster-02
- Name: cluster-02
- ID: 3b001063-8079-b1a6-764c-738af5a39a97
- State: ACTIVE
- Meta:
- env=production
-
- Peer ID: e83a315c-027e-bcb1-7c0c-a46650904a05
- Peer Server Name: server.dc1.consul
- Peer CA Pems: 0
- Peer Server Addresses:
- 10.0.0.1:8300
-
- Imported Services: 0
- Exported Services: 2
-
- Create Index: 89
- Modify Index: 89
- ```
-
-For more information, including optional flags and parameters, refer to the [`consul peering read` CLI command reference](/consul/commands/peering/read).
-
-
-
-
- ```shell-session
- $ curl --header "X-Consul-Token: b23b3cad-5ea1-4413-919e-c76884b9ad60" http://127.0.0.1:8500/v1/peering/cluster-02
- ```
-
-For more information, including optional parameters and sample responses, refer to the [`/peering` endpoint reference](/consul/api-docs/peering#read-a-peering-connection).
-
-
-
-
-1. In the Consul UI, click **Peers**.
-
-1. Click the name of a peered cluster to view additional details about the peering connection.
-
-
-
-
-## Delete peering connections
-
-You can disconnect the peered clusters by deleting their connection. Deleting a peering connection stops data replication to the peer and deletes imported data, including services and CA certificates.
-
-
-
-
- The following examples deletes a peering connection to a cluster locally referred to as "cluster-02":
-
- ```shell-session
- $ consul peering delete -name cluster-02
- Successfully submitted peering connection, cluster-02, for deletion
- ```
-
-For more information, including optional flags and parameters, refer to the [`consul peering delete` CLI command reference](/consul/commands/peering/delete).
-
-
-
-
- ```shell-session
- $ curl --request DELETE --header "X-Consul-Token: b23b3cad-5ea1-4413-919e-c76884b9ad60" http://127.0.0.1:8500/v1/peering/cluster-02
- ```
-
-This endpoint does not return a response. For more information, including optional parameters, refer to the [`/peering` endpoint reference](/consul/api-docs/peering#delete-a-peering-connection).
-
-
-
-1. In the Consul UI, click **Peers**. The UI lists peering connections you created for clusters in that datacenter.
-1. Next to the name of the peer, click **More** (three horizontal dots) and then **Delete**.
-1. Click **Delete** to confirm and remove the peering connection.
-
-
-
\ No newline at end of file
diff --git a/website/content/docs/connect/cluster-peering/usage/peering-traffic-management.mdx b/website/content/docs/connect/cluster-peering/usage/peering-traffic-management.mdx
deleted file mode 100644
index 63942e5bdeef..000000000000
--- a/website/content/docs/connect/cluster-peering/usage/peering-traffic-management.mdx
+++ /dev/null
@@ -1,168 +0,0 @@
----
-layout: docs
-page_title: Cluster Peering L7 Traffic Management
-description: >-
- Combine service resolver configurations with splitter and router configurations to manage L7 traffic in Consul deployments with cluster peering connections. Learn how to define dynamic traffic rules to target peers for redirects and failover.
----
-
-# Manage L7 traffic with cluster peering
-
-This usage topic describes how to configure and apply the [`service-resolver` configuration entry](/consul/docs/connect/config-entries/service-resolver) to set up redirects and failovers between services that have an existing cluster peering connection.
-
-For Kubernetes-specific guidance for managing L7 traffic with cluster peering, refer to [Manage L7 traffic with cluster peering on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/l7-traffic).
-
-## Service resolvers for redirects and failover
-
-When you use cluster peering to connect datacenters through their admin partitions, you can use [dynamic traffic management](/consul/docs/connect/manage-traffic) to configure your service mesh so that services automatically forward traffic to services hosted on peer clusters.
-
-However, the `service-splitter` and `service-router` configuration entry kinds do not natively support directly targeting a service instance hosted on a peer. Before you can split or route traffic to a service on a peer, you must define the service hosted on the peer as an upstream service by configuring a failover in the `service-resolver` configuration entry. Then, you can set up a redirect in a second service resolver to interact with the peer service by name.
-
-For more information about formatting, updating, and managing configuration entries in Consul, refer to [How to use configuration entries](/consul/docs/agent/config-entries).
-
-## Configure dynamic traffic between peers
-
-To configure L7 traffic management behavior in deployments with cluster peering connections, complete the following steps in order:
-
-1. Define the peer cluster as a failover target in the service resolver configuration.
-
- The following examples update the [`service-resolver` configuration entry](/consul/docs/connect/config-entries/service-resolver) in `cluster-01` so that Consul redirects traffic intended for the `frontend` service to a backup instance in peer `cluster-02` when it detects multiple connection failures.
-
-
-
- ```hcl
- Kind = "service-resolver"
- Name = "frontend"
- ConnectTimeout = "15s"
- Failover = {
- "*" = {
- Targets = [
- {Peer = "cluster-02"}
- ]
- }
- }
- ```
-
- ```json
- {
- "ConnectTimeout": "15s",
- "Kind": "service-resolver",
- "Name": "frontend",
- "Failover": {
- "*": {
- "Targets": [
- {
- "Peer": "cluster-02"
- }
- ]
- }
- },
- "CreateIndex": 250,
- "ModifyIndex": 250
- }
- ```
-
- ```yaml
- apiVersion: consul.hashicorp.com/v1alpha1
- kind: ServiceResolver
- metadata:
- name: frontend
- spec:
- connectTimeout: 15s
- failover:
- '*':
- targets:
- - peer: 'cluster-02'
- service: 'frontend'
- namespace: 'default'
- ```
-
-
-
-1. Define the desired behavior in `service-splitter` or `service-router` configuration entries.
-
- The following example splits traffic evenly between `frontend` services hosted on peers by defining the desired behavior locally:
-
-
-
- ```hcl
- Kind = "service-splitter"
- Name = "frontend"
- Splits = [
- {
- Weight = 50
- ## defaults to service with same name as configuration entry ("frontend")
- },
- {
- Weight = 50
- Service = "frontend-peer"
- },
- ]
- ```
-
- ```json
- {
- "Kind": "service-splitter",
- "Name": "frontend",
- "Splits": [
- {
- "Weight": 50
- },
- {
- "Weight": 50,
- "Service": "frontend-peer"
- }
- ]
- }
- ```
-
- ```yaml
- apiVersion: consul.hashicorp.com/v1alpha1
- kind: ServiceSplitter
- metadata:
- name: frontend
- spec:
- splits:
- - weight: 50
- ## defaults to service with same name as configuration entry ("frontend")
- - weight: 50
- service: frontend-peer
- ```
-
-
-
-1. Create a local `service-resolver` configuration entry named `frontend-peer` and define a redirect targeting the peer and its service:
-
-
-
- ```hcl
- Kind = "service-resolver"
- Name = "frontend-peer"
- Redirect {
- Service = frontend
- Peer = "cluster-02"
- }
- ```
-
- ```json
- {
- "Kind": "service-resolver",
- "Name": "frontend-peer",
- "Redirect": {
- "Service": "frontend",
- "Peer": "cluster-02"
- }
- }
- ```
-
- ```yaml
- apiVersion: consul.hashicorp.com/v1alpha1
- kind: ServiceResolver
- metadata:
- name: frontend-peer
- spec:
- redirect:
- peer: 'cluster-02'
- service: 'frontend'
- ```
-
-
\ No newline at end of file
diff --git a/website/content/docs/connect/config-entries/api-gateway.mdx b/website/content/docs/connect/config-entries/api-gateway.mdx
deleted file mode 100644
index dc5d6f63e2a5..000000000000
--- a/website/content/docs/connect/config-entries/api-gateway.mdx
+++ /dev/null
@@ -1,562 +0,0 @@
----
-layout: docs
-page_title: API Gateway configuration reference
-description: Learn how to configure a Consul API gateway on VMs.
----
-
-# API gateway configuration reference
-
-This topic provides reference information for the API gateway configuration entry that you can deploy to networks in virtual machine (VM) environments. For reference information about configuring Consul API gateways on Kubernetes, refer to [Gateway Resource Configuration](/consul/docs/connect/gateways/api-gateway/configuration/gateway).
-
-## Introduction
-
-A gateway is a type of network infrastructure that determines how service traffic should be handled. Gateways contain one or more listeners that bind to a set of hosts and ports. An HTTP Route or TCP Route can then attach to a gateway listener to direct traffic from the gateway to a service.
-
-## Configuration model
-
-The following list outlines field hierarchy, language-specific data types, and requirements in an `api-gateway` configuration entry. Click on a property name to view additional details, including default values.
-
-- [`Kind`](#kind): string | must be `"api-gateway"`
-- [`Name`](#name): string | no default
-- [`Namespace`](#namespace): string | no default
-- [`Partition`](#partition): string | no default
-- [`Meta`](#meta): map | no default
-- [`Listeners`](#listeners): list of objects | no default
- - [`Name`](#listeners-name): string | no default
- - [`Port`](#listeners-port): number | no default
- - [`Hostname`](#listeners-hostname): string | `"*"`
- - [`Protocol`](#listeners-protocol): string | `"tcp"`
- - [`TLS`](#listeners-tls): map | none
- - [`MinVersion`](#listeners-tls-minversion): string | no default
- - [`MaxVersion`](#listeners-tls-maxversion): string | no default
- - [`CipherSuites`](#listeners-tls-ciphersuites): list of strings | Envoy default cipher suites
- - [`Certificates`](#listeners-tls-certificates): list of objects | no default
- - [`Kind`](#listeners-tls-certificates-kind): string | no default
- - [`Name`](#listeners-tls-certificates-name): string | no default
- - [`Namespace`](#listeners-tls-certificates-namespace): string | no default
- - [`Partition`](#listeners-tls-certificates-partition): string | no default
- - [`default`](#listeners-default): map
- - [`JWT`](#listeners-default-jwt): map
- - [`Providers`](#listeners-default-jwt-providers): list
- - [`Name`](#listeners-default-jwt-providers): string
- - [`VerifyClaims`](#listeners-default-jwt-providers): map
- - [`Path`](#listeners-default-jwt-providers): list
- - [`Value`](#listeners-default-jwt-providers): string
- - [`override`](#listeners-override): map
- - [`JWT`](#listeners-override-jwt): map
- - [`Providers`](#listeners-override-jwt-providers): list
- - [`Name`](#listeners-override-jwt-providers): string
- - [`VerifyClaims`](#listeners-override-jwt-providers): map
- - [`Path`](#listeners-override-jwt-providers): list
- - [`Value`](#listeners-override-jwt-providers): string
-
-
-
-## Complete configuration
-
-When every field is defined, an `api-gateway` configuration entry has the following form:
-
-
-
-```hcl
-Kind = "api-gateway"
-Name = ""
-Namespace = ""
-Partition = ""
-
-Meta = {
- = ""
-}
-
-Listeners = [
- {
- Port =
- Name = ""
- Protocol = ""
- TLS = {
- MaxVersion = ""
- MinVersion = ""
- CipherSuites = [
- ""
- ]
- Certificates = [
- {
- Kind = "file-system-certificate"
- Name = ""
- Namespace = ""
- Partition = ""
- }
- ]
- }
- default = {
- JWT = {
- Providers = [
- Name = ""
- VerifyClaims = {
- Path = [""]
- Value = ""
- }
- ]
- }
- }
- override = {
- JWT = {
- Providers = [
- Name = ""
- VerifyClaims = {
- Path = [""]
- Value = ""
- }
- ]
- }
- }
- }
-]
-```
-
-```json
-{
- "Kind": "api-gateway",
- "Name": "",
- "Namespace": "",
- "Partition": "",
- "Meta": {
- "": ""
- },
- "Listeners": [
- {
- "Name": "",
- "Port": ,
- "Protocol": "",
- "TLS": {
- "MaxVersion": "",
- "MinVersion": "",
- "CipherSuites": [
- ""
- ],
- "Certificates": [
- {
- "Kind": "file-system-certificate",
- "Name": "",
- "Namespace": "",
- "Partition": ""
- }
- ]
- }
- },
- {
- "default": {
- "JWT": {
- "Providers": [
- {
- "Name": "",
- "VerifyClaims": {
- "Path": [""],
- "Value": ""
- }
- }
- ]
- }
- }
- },
- {
- "override": {
- "JWT": {
- "Providers": [
- {
- "Name": "",
- "VerifyClaims": {
- "Path": [""],
- "Value": ""
- }
- }
- ]
- }
- }
- }
- ]
-}
-```
-
-
-
-## Specification
-
-This section provides details about the fields you can configure in the
-`api-gateway` configuration entry.
-
-### `Kind`
-
-Specifies the type of configuration entry to implement. This must be
-`api-gateway`.
-
-#### Values
-
-- Default: none
-- This field is required.
-- Data type: string value that must be set to `"api-gateway"`.
-
-### `Name`
-
-Specifies a name for the configuration entry. The name is metadata that you can
-use to reference the configuration entry when performing Consul operations,
-such as applying a configuration entry to a specific cluster.
-
-#### Values
-
-- Default: none
-- This field is required.
-- Data type: string
-
-### `Namespace`
-
-Specifies the Enterprise [namespace](/consul/docs/enterprise/namespaces) to apply to the configuration entry.
-
-#### Values
-
-- Default: `"default"` in Enterprise
-- Data type: string
-
-### `Partition`
-
-Specifies the Enterprise [admin partition](/consul/docs/enterprise/admin-partitions) to apply to the configuration entry.
-
-#### Values
-
-- Default: `"default"` in Enterprise
-- Data type: string
-
-### `Meta`
-
-Specifies an arbitrary set of key-value pairs to associate with the gateway.
-
-#### Values
-
-- Default: none
-- Data type: map containing one or more keys and string values.
-
-### `Listeners[]`
-
-Specifies a list of listeners that gateway should set up. Listeners are
-uniquely identified by their port number.
-
-#### Values
-
-- Default: none
-- This field is required.
-- Data type: List of maps. Each member of the list contains the following fields:
- - [`Name`](#listeners-name)
- - [`Port`](#listeners-port)
- - [`Hostname`](#listeners-hostname)
- - [`Protocol`](#listeners-protocol)
- - [`TLS`](#listeners-tls)
-
-### `Listeners[].Name`
-
-Specifies the unique name for the listener. This field accepts letters, numbers, and hyphens.
-
-#### Values
-
-- Default: none
-- This field is required.
-- Data type: string
-
-### `Listeners[].Port`
-
-Specifies the port number that the listener receives traffic on.
-
-#### Values
-
-- Default: `0`
-- This field is required.
-- Data type: integer
-
-### `Listeners[].Hostname`
-
-Specifies the hostname that the listener receives traffic on.
-
-#### Values
-
-- Default: `"*"`
-- This field is optional.
-- Data type: string
-
-### `Listeners[].Protocol`
-
-Specifies the protocol associated with the listener.
-
-#### Values
-
-- Default: none
-- This field is required.
-- The data type is one of the following string values: `"tcp"` or `"http"`.
-
-### `Listeners[].TLS`
-
-Specifies the TLS configurations for the listener.
-
-#### Values
-
-- Default: none
-- Map that contains the following fields:
- - [`MaxVersion`](#listeners-tls-maxversion)
- - [`MinVersion`](#listeners-tls-minversion)
- - [`CipherSuites`](#listeners-tls-ciphersuites)
- - [`Certificates`](#listeners-tls-certificates)
-
-### `Listeners[].TLS.MaxVersion`
-
-Specifies the maximum TLS version supported for the listener.
-
-#### Values
-
-- Default depends on the version of Envoy:
- - Envoy 1.22.0 and later default to `TLSv1_2`
- - Older versions of Envoy default to `TLSv1_0`
-- Data type is one of the following string values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `Listeners[].TLS.MinVersion`
-
-Specifies the minimum TLS version supported for the listener.
-
-#### Values
-
-- Default: none
-- Data type is one of the following string values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `Listeners[].TLS.CipherSuites[]`
-
-Specifies a list of cipher suites that the listener supports when negotiating connections using TLS 1.2 or older.
-
-#### Values
-
-- Defaults to the ciphers supported by the version of Envoy in use. Refer to the
- [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites)
- for details.
-- Data type: List of string values. Refer to the
- [Consul repository](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169)
- for a list of supported ciphers.
-
-### `Listeners[].TLS.Certificates[]`
-
-The list of references to [file system](/consul/docs/connect/config-entries/file-system-certificate) or [inline certificates](/consul/docs/connect/config-entries/inline-certificate) that the listener uses for TLS termination. You should create the configuration entry for the certificate separately and then reference the configuration entry in the `Name` field.
-
-#### Values
-
-- Default: None
-- Data type: List of maps. Each member of the list has the following fields:
- - [`Kind`](#listeners-tls-certificates-kind)
- - [`Name`](#listeners-tls-certificates-name)
- - [`Namespace`](#listeners-tls-certificates-namespace)
- - [`Partition`](#listeners-tls-certificates-partition)
-
-### `Listeners[].TLS.Certificates[].Kind`
-
-The list of references to certificates that the listener uses for TLS termination.
-
-#### Values
-
-- Default: None
-- This field is required.
-- The data type is one of the following string values: `"file-system-certificate"` or `"inline-certificate"`.
-
-### `Listeners[].TLS.Certificates[].Name`
-
-Specifies the name of the [file system certificate](/consul/docs/connect/config-entries/file-system-certificate) or [inline certificate](/consul/docs/connect/config-entries/inline-certificate) that the listener uses for TLS termination.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: string
-
-### `Listeners[].TLS.Certificates[].Namespace`
-
-Specifies the Enterprise [namespace](/consul/docs/enterprise/namespaces) where the certificate can be found.
-
-#### Values
-
-- Default: `"default"` in Enterprise
-- Data type: string
-
-### `Listeners[].TLS.Certificates[].Partition`
-
-Specifies the Enterprise [admin partition](/consul/docs/enterprise/admin-partitions) where the certificate can be found.
-
-#### Values
-
-- Default: `"default"` in Enterprise
-- Data type: string
-
-### `Listeners[].default`
-
-Specifies a block of default configurations to apply to the gateway listener. All routes attached to the listener inherit the default configurations. You can specify override configurations that have precedence over default configurations in the [`override` block](#listeners-override) as well as in the `JWT` block in the [HTTP route configuration entry](/consul/docs/connect/config-entries/http-route).
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Listeners[].default{}.JWT`
-
-Specifies a block of default JWT verification configurations to apply to the gateway listener. Specify configurations that have precedence over the defaults in either the [`override.JWT` block](#listeners-override) or in the [`JWT` block](/consul/docs/connect/config-entries/http-route#rules-filters-jwt) in the HTTP route configuration. Refer to [Use JWTs to verify requests to API gateways](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms) for order of precedence and other details about using JWT verification in API gateways.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Listeners[].default{}.JWT{}.Providers`
-
-Specifies a list of default JWT provider configurations to apply to the gateway listener. A provider configuration contains the name of the provider and claims. Specify configurations that have precedence over the defaults in either the [`override.JWT.Providers` block](#listeners-override-providers) or in the [`JWT` block](/consul/docs/connect/config-entries/http-route#rules-filters-jwt-providers) of the HTTP route configuration. Refer to [Use JWTs to verify requests to API gateways](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms) for order of precedence and other details about using JWT verification in API gateways.
-
-#### Values
-
-- Default: None
-- Data type: List of maps
-
-The following table describes the parameters you can specify in a member of the `Providers` list:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
-| `Name` | Specifies the name of the provider. | String | None |
-| `VerifyClaims` | Specifies a list of paths and a value that define the claim that Consul verifies when it receives a request. The `VerifyClaims` map specifies the following settings:
`Path`: Specifies a list of one or more registered or custom claims.
`Value`: Specifies the expected value of the claim.
| Map | None |
-
-Refer to [Configure JWT verification settings](#configure-jwt-verification-settings) for an example configuration.
-
-### `Listeners[].override`
-
-Specifies a block of configurations to apply to the gateway listener. The override settings have precedence over the configurations in the [`Listeners[].default` block](#listeners-default).
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Listeners[].override{}.JWT`
-
-Specifies a block of JWT verification configurations to apply to the gateway listener. The override settings have precedence over the [`Listeners[].default` configurations](#listeners-default) as well as any route-specific JWT configurations.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Listeners[].override{}.JWT{}.Providers`
-
-Specifies a list of JWT provider configurations to apply to the gateway listener. A provider configuration contains the name of the provider and claims. The override settings have precedence over `Listeners[].defaults{}.JWT{}.Providers` as well as any listener-specific configuration.
-
-#### Values
-
-- Default: None
-- Data type: List of maps
-
-The following table describes the parameters you can specify in a member of the `Providers` list:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
-| `Name` | Specifies the name of the provider. | String | None |
-| `VerifyClaims` | Specifies a list of paths and a value that define the claim that Consul verifies when it receives a request. The `VerifyClaims` map specifies the following settings:
`Path`: Specifies a list of one or more registered or custom claims.
`Value`: Specifies the expected value of the claim.
| Map | None |
-
-Refer to [Configure JWT verification settings](#configure-jwt-verification-settings) for an example configuration.
-
-## Examples
-
-The following examples demonstrate common API gateway configuration patterns for specific use cases.
-
-### Configure JWT verification settings
-
-The following example configures `listener-one` to verify that requests include a token with Okta user permissions by default. The listener also verifies that the token has an audience of `api.apps.organization.com`.
-
-
-
-
-```hcl
-Kind = "api-gateway"
-Name = "api-gateway"
-Listeners = [
- {
- name = "listener-one"
- port = 9001
- protocol = "http"
- # override and default are backed by the same type of data structure, see the following section for more on how they interact
- override = {
- JWT = {
- Providers = [
- {
- Name = "okta",
- VerifyClaims = {
- Path = ["aud"],
- Value = "api.apps.organization.com",
- }
- },
- ]
- }
- }
- default = {
- JWT = {
- Providers = [
- {
- Name = "okta",
- VerifyClaims = {
- Path = ["perms", "role"],
- Value = "user",
- }
- }
- ]
- }
- }
- }
-]
-```
-
-
-
-```json
-{
- "Kind": "api-gateway",
- "Name": "api-gateway",
- "Listeners": [
- {
- "name": "listener-one",
- "port": 9001,
- "protocol": "http",
- "override": {
- "JWT": {
- "Providers": [{
- "Name": "okta",
- "VerifyClaims": {
- "Path": ["aud"],
- "Value": "api.apps.organization.com"
- }
- }]
- }
- },
- "default": {
- "JWT": {
- "Providers": [{
- "Name": "okta",
- "VerifyClaims": {
- "Path": ["perms", "role"],
- "Value": "user"
- }
- }]
- }
- }
- }
- ]
-}
-```
-
-
-
diff --git a/website/content/docs/connect/config-entries/index.mdx b/website/content/docs/connect/config-entries/index.mdx
deleted file mode 100644
index 309ea26b177a..000000000000
--- a/website/content/docs/connect/config-entries/index.mdx
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: docs
-page_title: Configuration Entry Overview
-description: >-
- Configuration entries define service mesh behaviors in order to secure and manage traffic. Learn about Consul’s different config entry kinds and get links to configuration reference pages.
----
-
-# Configuration Entry Overview
-
-Configuration entries can be used to configure the behavior of Consul service mesh.
-
-The following configuration entries are supported:
-
-- [API Gateway](/consul/docs/connect/config-entries/api-gateway) - defines the configuration for an API gateway
-
-- [Ingress Gateway](/consul/docs/connect/config-entries/ingress-gateway) - defines the
- configuration for an ingress gateway
-
-- [Mesh](/consul/docs/connect/config-entries/mesh) - controls
- mesh-wide configuration that applies across namespaces and federated datacenters.
-
-- [Exported Services](/consul/docs/connect/config-entries/exported-services) - enables
- Consul to export service instances to other peers or to other admin partitions local or remote to the datacenter.
-
-- [Proxy Defaults](/consul/docs/connect/config-entries/proxy-defaults) - controls
- proxy configuration
-
-- [Sameness Group](/consul/docs/connect/config-entries/sameness-group) - defines partitions and cluster peers with identical services
-
-- [Service Defaults](/consul/docs/connect/config-entries/service-defaults) - configures
- defaults for all the instances of a given service
-
-- [Service Intentions](/consul/docs/connect/config-entries/service-intentions) - defines
- the [intentions](/consul/docs/connect/intentions) for a destination service
-
-- [Service Resolver](/consul/docs/connect/config-entries/service-resolver) - matches
- service instances with a specific Connect upstream discovery requests
-
-- [Service Router](/consul/docs/connect/config-entries/service-router) - defines
- where to send layer 7 traffic based on the HTTP route
-
-- [Service Splitter](/consul/docs/connect/config-entries/service-splitter) - defines
- how to divide requests for a single HTTP route based on percentages
-
-- [Terminating Gateway](/consul/docs/connect/config-entries/terminating-gateway) - defines the
- services associated with terminating gateway
-
-## Managing Configuration Entries
-
-See [Agent - Config Entries](/consul/docs/agent/config-entries).
-
-## Using Configuration Entries For Service Defaults
-
-Outside of Kubernetes, when the agent is
-[configured](/consul/docs/agent/config/config-files#enable_central_service_config) to enable
-central service configurations, it will look for service configuration defaults
-that match a registering service instance. If it finds any, the agent will merge
-those defaults with the service instance configuration. This allows for things
-like service protocol or proxy configuration to be defined globally and
-inherited by any affected service registrations.
diff --git a/website/content/docs/connect/config-entries/ingress-gateway.mdx b/website/content/docs/connect/config-entries/ingress-gateway.mdx
deleted file mode 100644
index cd8eaf326f1e..000000000000
--- a/website/content/docs/connect/config-entries/ingress-gateway.mdx
+++ /dev/null
@@ -1,1876 +0,0 @@
----
-layout: docs
-page_title: Ingress gateway configuration reference
-description: >-
- The ingress gateway configuration entry kind defines behavior for securing incoming communication between the service mesh and external sources. Learn about `""ingress-gateway""` config entry parameters for exposing TCP and HTTP listeners.
----
-
-# Ingress gateway configuration reference
-
-
-
-Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported in this version but will be removed in a future release of Consul.
-
-Consul's API gateway is the recommended alternative to ingress gateway.
-
-
-
-This topic provides configuration reference information for the ingress gateway configuration entry. An ingress gateway is a type of proxy you register as a service in Consul to enable network connectivity from external services to services inside of the service mesh. Refer to [Ingress gateways overview](/consul/docs/connect/gateways/ingress-gateway) for additional information.
-
-## Configuration model
-
-The following list describes the configuration hierarchy, language-specific data types, default values if applicable, and requirements for the configuration entry. Click on a property name to view additional details.
-
-
-
-
-- [`Kind`](#kind): string | must be `ingress-gateway` | required
-- [`Name`](#name): string | required
-- [`Namespace`](#namespace): string | `default` |
-- [`Meta`](#meta): map of strings
-- [`Partition`](#partition): string | `default` |
-- [`TLS`](#tls): map
- - [`Enabled`](#tls-enabled): boolean | `false`
- - [`TLSMinVersion`](#tls-tlsminversion): string | `TLSv1_2`
- - [`TLSMaxVersion`](#tls-tlsmaxversion): string
- - [`CipherSuites`](#tls-ciphersuites): list of strings
- - [`SDS`](#tls-sds): map of strings
- - [`ClusterName`](#tls-sds): string
- - [`CertResource`](#tls-sds): string
-- [`Defaults`](#defaults): map
- - [`MaxConnections`](#defaults-maxconnections): number
- - [`MaxPendingRequests`](#defaults-maxpendingrequests): number
- - [`MaxConcurrentRequests`](#defaults-maxconcurrentrequests): number
- - [`PassiveHealthCheck`](#defaults-passivehealthcheck): map
- - [`Interval`](#defaults-passivehealthcheck): number
- - [`MaxFailures`](#defaults-passivehealthcheck): number
- - [`EnforcingConsecutive5xx`](#defaults-passivehealthcheck): number
- - [`MaxEjectionPercent`](#defaults-passivehealthcheck): number
- - [`BaseEjectionTime`](#defaults-passivehealthcheck): string
-- [`Listeners`](#listeners): list of maps
- - [`Port`](#listeners-port): number | `0`
- - [`Protocol`](#listeners-protocol): number | `tcp`
- - [`Services`](#listeners-services): list of objects
- - [`Name`](#listeners-services-name): string
- - [`Namespace`](#listeners-services-namespace): string |
- - [`Partition`](#listeners-services-partition): string |
- - [`Hosts`](#listeners-services-hosts): List of strings | `.ingress.*`
- - [`RequestHeaders`](#listeners-services-requestheaders): map
- - [`Add`](#listeners-services-requestheaders): map of strings
- - [`Set`](#listeners-services-requestheaders): map of strings
- - [`Remove`](#listeners-services-requestheaders): list of strings
- - [`ResponseHeaders`](#listeners-services-responseheaders): map
- - [`Add`](#listeners-services-responseheaders): map of strings
- - [`Set`](#listeners-services-responseheaders): map of strings
- - [`Remove`](#listeners-services-responseheaders): list of strings
- - [`TLS`](#listeners-services-tls): map
- - [`SDS`](#listeners-services-tls-sds): map of strings
- - [`ClusterName`](#listeners-services-tls-sds): string
- - [`CertResource`](#listeners-services-tls-sds): string
- - [`MaxConnections`](#listeners-services-maxconnections): number | `0`
- - [`MaxPendingRequests`](#listeners-services-maxconnections): number | `0`
- - [`MaxConcurrentRequests`](#listeners-services-maxconnections): number | `0`
- - [`PassiveHealthCheck`](#listeners-services-passivehealthcheck): map
- - [`Interval`](#listeners-services-passivehealthcheck): number
- - [`MaxFailures`](#listeners-services-passivehealthcheck): number
- - [`EnforcingConsecutive5xx`](#listeners-services-passivehealthcheck): number
- - [`MaxEjectionPercent`](#listeners-services-passivehealthcheck): number
- - [`BaseEjectionTime`](#listeners-services-passivehealthcheck): string
- - [`TLS`](#listeners-tls): map
- - [`Enabled`](#listeners-tls-enabled): boolean | `false`
- - [`TLSMinVersion`](#listeners-tls-tlsminversion): string | `TLSv1_2`
- - [`TLSMaxVersion`](#listeners-tls-tlsmaxversion): string
- - [`CipherSuites`](#listeners-tls-ciphersuites): list of strings
- - [`SDS`](#listeners-tls-sds): map of strings
- - [`ClusterName`](#listeners-tls-sds): string
- - [`CertResource`](#listeners-tls-sds): string
-
-
-
-
-
-- [ `apiVersion`](#apiversion): string | must be set to `consul.hashicorp.com/v1alpha1` | required
-- [`kind`](#kind): string | must be `IngressGateway` | required
-- [`metadata`](#metadata): map of strings
- - [`name`](#metadata-name): string | required
- - [`namespace`](#metadata-namespace): string | `default` |
-- [`spec`](#spec): map
- - [`tls`](#spec-tls): map
- - [`enabled`](#spec-tls-enabled): boolean | `false`
- - [`tlsMinVersion`](#spec-tls-tlsminversion): string | `TLSv1_2`
- - [`tlsMaxVersion`](#spec-tls-tlsmaxversion): string
- - [`cipherSuites`](#spec-tls-ciphersuites): list of strings
- - [`sds`](#spec-tls-sds): map of strings
- - [`clusterName`](#spec-tls-sds): string
- - [`certResource`](#spec-tls-sds): string
- - [`defaults`](#spec-defaults): map
- - [`maxConnections`](#spec-defaults-maxconnections): number
- - [`maxPendingRequests`](#spec-defaults-maxpendingrequests): number
- - [`maxConcurrentRequests`](#spec-defaults-maxconcurrentrequests): number
- - [`passiveHealthCheck`](#spec-defaults-passivehealthcheck): map
- - [`interval`](#spec-defaults-passivehealthcheck): string
- - [`maxFailures`](#spec-defaults-passivehealthcheck): integer
- - [`enforcingConsecutive5xx`](#spec-defaults-passivehealthcheck): number
- - [`maxEjectionPercent`](#spec-defaults-passivehealthcheck): number
- - [`baseEjectionTime`](#spec-defaults-passivehealthcheck): string
- - [`listeners`](#spec-listeners): list of maps
- - [`port`](#spec-listeners-port): number | `0`
- - [`protocol`](#spec-listeners-protocol): number | `tcp`
- - [`services`](#spec-listeners-services): list of maps
- - [`name`](#spec-listeners-services-name): string
- - [`namespace`](#spec-listeners-services-namespace): string | current namespace |
- - [`partition`](#spec-listeners-services-partition): string | current partition |
- - [`hosts`](#spec-listeners-services-hosts): list of strings | `.ingress.*`
- - [`requestHeaders`](#spec-listeners-services-requestheaders): map
- - [`add`](#spec-listeners-services-requestheaders): map of strings
- - [`set`](#spec-listeners-services-requestheaders): map of strings
- - [`remove`](#spec-listeners-services-requestheaders): list of strings
- - [`responseHeaders`](#spec-listeners-services-responseheaders): map
- - [`add`](#spec-listeners-services-responseheaders): map of strings
- - [`set`](#spec-listeners-services-responseheaders): map of strings
- - [`remove`](#spec-listeners-services-responseheaders): list of strings
- - [`tls`](#spec-listeners-services-tls): map
- - [`sds`](#spec-listeners-services-tls-sds): map of strings
- - [`clusterName`](#spec-listeners-services-tls-sds): string
- - [`certResource`](#spec-listeners-services-tls-sds): string
- - [`maxConnections`](#spec-listeners-services-maxconnections): number | `0`
- - [`maxPendingRequests`](#spec-listeners-services-maxconnections): number | `0`
- - [`maxConcurrentRequests`](#spec-listeners-services-maxconnections): number | `0`
- - [`passiveHealthCheck`](#spec-listeners-services-passivehealthcheck): map
- - [`interval`](#spec-listeners-services-passivehealthcheck): string
- - [`maxFailures`](#spec-listeners-services-passivehealthcheck): number
- - [`enforcingConsecutive5xx`](#spec-listeners-services-passivehealthcheck): number
- - [`maxEjectionPercent`](#spec-listeners-services-passivehealthcheck): integer
- - [`baseEjectionTime`](#spec-listeners-services-passivehealthcheck): string
- - [`tls`](#spec-listeners-tls): map
- - [`enabled`](#spec-listeners-tls-enabled): boolean | `false`
- - [`tlsMinVersion`](#spec-listeners-tls-tlsminversion): string | `TLSv1_2`
- - [`tlsMaxVersion`](#spec-listeners-tls-tlsmaxversion): string
- - [`cipherSuites`](#spec-listeners-tls-ciphersuites): list of strings
- - [`sds`](#spec-listeners-tls-sds): map of strings
- - [`clusterName`](#spec-listeners-tls-sds): string
- - [`certResource`](#spec-listeners-tls-sds): string
-
-
-
-
-
-## Complete configuration
-
-When every field is defined, an ingress gateway configuration entry has the following form:
-
-
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = ""
-Namespace = ""
-Partition = ""
-Meta = {
- = ""
-}
-TLS = {
- Enabled = false
- TLSMinVersion = "TLSv1_2"
- TLSMaxVersion = ""
- CipherSuites = [
- ""
- ]
- SDS = {
- ClusterName = ""
- CertResource = ""
- }
-}
-Defaults = {
- MaxConnections =
- MaxPendingRequests =
- MaxConcurrentRequests =
- PassiveHealthCheck = {
- Interval = ""
- MaxFailures =
- EnforcingConsecutive5xx =
- MaxEjectionPercent =
- BaseEjectionTime = ""
- }
-}
-Listeners = [
- {
- Port = 0
- Protocol = "tcp"
- Services = [
- {
- Name = ""
- Namespace = ""
- Partition = ""
- Hosts = [
- ".ingress.*"
- ]
- RequestHeaders = {
- Add = {
- RequestHeaderName = ""
- }
- Set = {
- RequestHeaderName = ""
- }
- Remove = [
- ""
- ]
- }
- ResponseHeaders = {
- Add = {
- ResponseHeaderName = ""
- }
- Set = {
- ResponseHeaderName = ""
- }
- Remove = [
- ""
- ]
- }
- TLS = {
- SDS = {
- ClusterName = ""
- CertResource = ""
- }
- }
- MaxConnections =
- MaxPendingRequests =
- MaxConcurrentRequests =
- PassiveHealthCheck = {
- Interval = ""
- MaxFailures =
- EnforcingConsecutive5xx =
- MaxEjectionPercent =
- BaseEjectionTime = ""
- }
- }]
- TLS = {
- Enabled = false
- TLSMinVersion = "TLSv1_2"
- TLSMaxVersion = ""
- CipherSuites = [
- ""
- ]
- SDS = {
- ClusterName = ""
- CertResource = ""
- }
- }
- }
-]
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: IngressGateway
-metadata:
- name:
- namespace: ""
-spec:
- tls:
- enabled: false
- tlsSMinVersion: TLSv1_2
- tlsMaxVersion: ""
- cipherSuites:
- -
- sds:
- clusterName:
- certResource:
- defaults:
- maxConnections:
- maxPendingRequests:
- maxConcurrentRequests:
- passiveHealthCheck:
- interval: ""
- maxFailures:
- enforcingConsecutive5xx:
- maxEjectionPercent:
- baseEjectionTime: ""
- listeners:
- - port: 0
- protocol: tcp
- services:
- - name:
- namespace:
- partition:
- hosts:
- - .ingress.*
- requestHeaders:
- add:
- requestHeaderName:
- set:
- requestHeaderName:
- remove:
- -
- responseHeaders:
- add:
- responseHeaderName:
- set:
- responseHeaderName:
- remove:
- -
- tls:
- sds:
- clusterName:
- certResource:
- maxConnections:
- maxPendingRequests:
- maxConcurrentRequests:
- passiveHealthCheck:
- interval: ""
- maxFailures:
- enforcingConsecutive5xx:
- maxEjectionPercent:
- baseEjectionTime: ""
- tls:
- enabled: false
- tlsMinVersion: TLSv1_2
- tlsMaxVersion:
- cipherSuites:
- -
- sds:
- clusterName:
- certResource:
-```
-
-
-
-
-
-```json
-{
- "Kind" : "ingress-gateway",
- "Name" : "",
- "Namespace" : "",
- "Partition" : "",
- "Meta": {
- "" : ""
- },
- "TLS" : {
- "Enabled" : false,
- "TLSMinVersion" : "TLSv1_2",
- "TLSMaxVersion" : "",
- "CipherSuites" : [
- ""
- ],
- "SDS": {
- "ClusterName" : "",
- "CertResource" : ""
- }
- },
- "Defaults" : {
- "MaxConnections" : ,
- "MaxPendingRequests" : ,
- "MaxConcurrentRequests": ,
- "PassiveHealthCheck" : {
- "interval": "",
- "maxFailures": ,
- "enforcingConsecutive5xx": ,
- "maxEjectionPercent": ,
- "baseEjectionTime": ""
- }
- },
- "Listeners" : [
- {
- "Port" : 0,
- "Protocol" : "tcp",
- "Services" : [
- {
- "Name" : "",
- "Namespace" : "",
- "Partition" : "",
- "Hosts" : [
- ".ingress.*"
- ],
- "RequestHeaders" : {
- "Add" : {
- "RequestHeaderName" : ""
- },
- "Set" : {
- "RequestHeaderName" : ""
- },
- "Remove" : [
- ""
- ]
- },
- "ResponseHeaders" : {
- "Add" : {
- "ResponseHeaderName" : ""
- },
- "Set" : {
- "ResponseHeaderName" : ""
- },
- "Remove" : [
- ""
- ]
- },
- "TLS" : {
- "SDS" : {
- "ClusterName" : "",
- "CertResource" : ""
- }
- },
- "MaxConnections" : ,
- "MaxPendingRequests" : ,
- "MaxConcurrentRequests" : ,
- "PassiveHealthCheck" : {
- "interval": "",
- "maxFailures": ,
- "enforcingConsecutive5xx":,
- "maxEjectionPercent": ,
- "baseEjectionTime": ""
- }
- }
- ],
- "TLS" : {
- "Enabled" : false,
- "TLSMinVersion" : "TLSv1_2",
- "TLSMaxVersion" : "",
- "CipherSuites" : [
- ""
- ],
- "SDS" : {
- "ClusterName" : "",
- "CertResource" : ""
- }
- }
- }
- ]
-}
-```
-
-
-
-
-
-## Specification
-
-This section provides details about the fields you can configure in the ingress gateway configuration entry.
-
-
-
-
-
-### `Kind`
-
-Specifies the type of configuration entry. Must be set to `ingress-gateway`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value that must be set to `ingress-gateway`.
-
-### `Name`
-
-Specifies a name for the gateway. The name is metadata that you can use to reference the configuration entry when performing Consul operations with the [`consul config` command](/consul/commands/config).
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String
-
-### `Namespace`
-
-Specifies the namespace to apply the configuration entry in. Refer to [Namespaces](/consul/docs/enterprise/namespaces) for additional information about Consul namespaces.
-
-If unspecified, the ingress gateway is applied to the `default` namespace. You can override the namespace when using the [`/config` API endpoint](/consul/api-docs/config) to register the configuration entry by specifying the `ns` query parameter.
-
-#### Values
-
-- Default: `default`,
-- Data type: String
-
-### `Partition`
-
-Specifies the admin partition that the ingress gateway applies to. The value must match the partition in which the gateway is registered. Refer to [Admin partitions](/consul/docs/enterprise/admin-partitions) for additional information.
-
-If unspecified, the ingress gateway is applied to the `default` partition. You can override the partition when using the [`/config` API endpoint](/consul/api-docs/config) to register the configuration entry by specifying the `partition` query parameter.
-
-#### Values
-
-- Default: `default
-- Data type: String
-
-### `Meta`
-
-Defines an arbitrary set of key-value pairs to store in the Consul KV.
-
-#### Values
-
-- Default: None
-- Data type: Map of one or more key-value pairs.
- - keys: String
- - values: String, integer, or float
-
-### `TLS`
-
-Specifies the TLS configuration settings for the gateway.
-
-#### Values
-
-- Default: No default
-- Data type: Object that can contain the following fields:
- - [`Enabled`](#tls-enabled)
- - [`TLSMinVersion`](#tls-tlsminversion)
- - [`TLSMaxVersion`](#tls-tlsmaxversion)
- - [`CipherSuites`](#tls-ciphersuites)
- - [`SDS`](#tls-sds)
-
-### `TLS.Enabled`
-
-Enables and disables TLS for the configuration entry. Set to `true` to enable built-in TLS for every listener on the gateway. TLS is disabled by default.
-
-When enabled, Consul adds each host defined in every service's `Hosts` field to the gateway's x509 certificate as a DNS subject alternative name (SAN).
-
-#### Values
-
- - Default: `false`
- - Data type: boolean
-
-### `TLS.TLSMinVersion`
-
-Specifies the minimum TLS version supported for gateway listeners.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `TLS.TLSMaxVersion`
-
-Specifies the maximum TLS version supported for gateway listeners.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `TLS.CipherSuites[]`
-
-Specifies a list of cipher suites that gateway listeners support when negotiating connections using TLS 1.2 or older. If unspecified, the Consul applies the default for the version of Envoy in use. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tls parameters-cipher-suites) for details.
-
-#### Values
-
-- Default: None
-- Data type: List of string values. Refer to the [Consul repository](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) for a list of supported ciphers.
-
-### `TLS.SDS`
-
-Specifies parameters for loading the TLS certificates from an external SDS service. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-Consul applies the SDS configuration specified in this field as defaults for all listeners defined in the gateway. You can override the SDS settings for per listener or per service defined in the listener. Refer to the following configurations for additional information:
-
-- [`Listeners.TLS.SDS`](#listeners-tls-sds): Configures SDS settings for all services in the listener.
-- [`Listeners.Services.TLS.SDS`](#listeners-services-tls-sds): Configures SDS settings for a specific service defined in the listener.
-
-#### Values
-
-- Default: None
-- Data type: Map containing the following fields:
- - `ClusterName`
- - `CertResource`
-
-The following table describes how to configure SDS parameters.
-
-| Parameter | Description | Data type |
-| --- | --- | --- |
-| `ClusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `CertResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-### `Defaults`
-
-Specifies default configurations for connecting upstream services.
-
-#### Values
-
-- Default: None
-- The data type is a map containing the following parameters:
-
- - [`MaxConnections`](#defaults-maxconnections)
- - [`MaxPendingRequests`](#defaults-maxpendingrequests)
- - [`MaxConcurrentRequests`](#defaults-maxconcurrentrequests)
-
-### `Defaults.MaxConnections`
-
-Specifies the maximum number of HTTP/1.1 connections a service instance is allowed to establish against the upstream.
-
-#### Values
-
-- Default value is `0`, which instructs Consul to use the proxy's configuration. For Envoy, the default is `1024`.
-- Data type: Integer
-
-### `Defaults.MaxPendingRequests`
-
-Specifies the maximum number of requests that are allowed to queue while waiting to establish a connection. Listeners must use an L7 protocol for this configuration to take effect. Refer to [`Listeners.Protocol`](#listeners-protocol).
-
-#### Values
-
-- Default value is `0`, which instructs Consul to use the proxy's configuration. For Envoy, the default is `1024`.
-- Data type: Integer
-
-### `Defaults.MaxConcurrentRequests`
-
-Specifies the maximum number of concurrent HTTP/2 traffic requests that are allowed at a single point in time. Listeners must use an L7 protocol for this configuration to take effect. Refer to [`Listeners.Protocol`](#listeners-protocol).
-
-#### Values
-
-- Default value is `0`, which instructs Consul to use the proxy's configuration. For Envoy, the default is `1024`.
-- Data type: Integer
-
-### `Defaults.PassiveHealthCheck`
-
-Defines a passive health check configuration. Passive health checks remove hosts from the upstream cluster when they are unreachable or return errors.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the configurations for passive health checks:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
- | `Interval` | Specifies the time between checks. | string | `0s` |
- | `MaxFailures` | Specifies the number of consecutive failures allowed per check interval. If exceeded, Consul removes the host from the load balancer. | integer | `0` |
- | `EnforcingConsecutive5xx` | Specifies a percentage that indicates how many times out of 100 that Consul ejects the host when it detects an outlier status. The outlier status is determined by consecutive errors in the 500-599 response range. | integer | `100` |
- | `MaxEjectionPercent` | Specifies the maximum percentage of an upstream cluster that Consul ejects when the proxy reports an outlier. Consul ejects at least one host when an outlier is detected regardless of the value. | integer | `10` |
- | `BaseEjectionTime` | Specifies the minimum amount of time that an ejected host must remain outside the cluster before rejoining. The real time is equal to the value of the `BaseEjectionTime` multiplied by the number of times the host has been ejected. | string | `30s` |
-
-### `Listeners[]`
-
-Specifies a list of listeners in the mesh for the gateway. Listeners are uniquely identified by their port number.
-
-#### Values
-
-- Default: None
-- Data type: List of maps containing the following fields:
- - [`Port`](#listeners-port)
- - [`Protocol`](#listeners-protocol)
- - [`Services[]`](#listeners-services)
- - [`TLS`](#listeners-tls)
-
-### `Listeners[].Port`
-
-Specifies the port that the listener receives traffic on. The port is bound to the IP address specified in the [`-address`](/consul/commands/connect/envoy#address) flag when starting the gateway. The listener port must not conflict with the health check port.
-
-#### Values
-
-- Default: `0`
-- Data type: Integer
-
-### `Listeners[].Protocol`
-
-Specifies the protocol associated with the listener. To enable L7 network management capabilities, specify one of the following values:
-
-- `http`
-- `http2`
-- `grpc`
-
-#### Values
-
-- Default: `tcp`
-- Data type: String that contains one of the following values:
-
- - `tcp`
- - `http`
- - `http2`
- - `grpc`
-
-### `Listeners[].Services[]`
-
-Specifies a list of services that the listener exposes to services outside the mesh. Each service must have a unique name. The `Namespace` field is required for Consul Enterprise datacenters. If the [`Listeners.Protocol`] field is set to `tcp`, then Consul can only expose one service. You can expose multiple services if the listener uses any other supported protocol.
-
-#### Values
-
-- Default: None
-- Data type: List of maps that can contain the following fields:
- - [`Name`](#listeners-services-name)
- - [`Namespace`](#listeners-services-namespace)
- - [`Partition`](#listeners-services-partition)
- - [`Hosts`](#listeners-services-hosts)
- - [`RequestHeaders`](#listeners-services-requestheaders)
- - [`ResponseHeaders`](#listeners-services-responseheaders)`
- - [`TLS`](#listeners-services-tls)
- - [`MaxConnections`](#listeners-services-maxconnections)
- - [`MaxPendingRequests`](#listeners-services-maxpendingrequests)
- - [`MaxConcurrentRequests`](#listeners-services-maxconcurrentrequests)
- - [`PassiveHealthCheck`](#listeners-services-passivehealthcheck)
-
-### `Listeners[].Services[].Name`
-
-Specifies the name of a service to expose to the listener. You can specify services in the following ways:
-
-- Provide the name of a service registered in the Consul catalog.
-- Provide the name of a service defined in other configuration entries. Refer to [Service Mesh Traffic Management Overview](/consul/docs/connect/manage-traffic) for additional information.
-- Provide a `*` wildcard to expose all services in the datacenter. Wild cards are not supported for listeners configured for TCP. Refer to [`Listeners[].Protocol`](#listeners-protocol) for additional information.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Listeners[].Services[].Namespace`
-
-Specifies the namespace to use when resolving the location of the service.
-
-#### Values
-
-- Default: Current namespace
-- Data type: String
-
-### `Listeners[].Services[].Partition`
-
-Specifies the admin partition to use when resolving the location of the service.
-
-#### Values
-
-- Default: Current partition
-- Data type: String
-
-### `Listeners[].Services[].Hosts[]`
-
-Specifies one or more hosts that the listening services can receive requests on. The ingress gateway proxies external traffic to the specified services when external requests include `host` headers that match a host specified in this field.
-
-If unspecified, Consul matches requests to services using the `.ingress.*` domain. You cannot specify a host for listeners that communicate over TCP. You cannot specify a host when service names are specified with a `*` wildcard. Requests must include the correct host for Consul to proxy traffic to the service.
-
-When TLS is disabled, you can use the `*` wildcard to match all hosts. Disabling TLS may be suitable for testing and learning purposes, but we recommend enabling TLS in production environments.
-
-You can use the wildcard in the left-most DNS label to match a set of hosts. For example, `*.example.com` is valid, but `example.*` and `*-suffix.example.com` are invalid.
-
-#### Values
-
-- Default: None
-- Data type: List of strings or `*`
-
-### `Listeners[].Services[].RequestHeaders`
-
-Specifies a set of HTTP-specific header modification rules applied to requests routed through the gateway. You cannot configure request headers if the listener protocol is set to `tcp`. Refer to [HTTP listener with Path-based Routing](#http-listener-with-path-based-routing) for an example configuration.
-
-#### Values
-
-- Default: None
-- Data type: Object containing one or more fields that define header modification rules:
-
- - `Add`: Map of one or more key-value pairs
- - `Set`: Map of one or more key-value pairs
- - `Remove`: Map of one or more key-value pairs
-
-The following table describes how to configure values for request headers:
-
-| Rule | Description | Data type |
-| --- | --- | --- |
-| `Add` | Defines a set of key-value pairs to add to the header. Use header names as the keys. Header names are not case-sensitive. If header values with the same name already exist, the value is appended and Consul applies both headers. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `Set` | Defines a set of key-value pairs to add to the request header or to replace existing header values with. Use header names as the keys. Header names are not case-sensitive. If header values with the same names already exist, Consul replaces the header values. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `Remove` | Defines a list of headers to remove. Consul removes only headers containing exact matches. Header names are not case-sensitive. | List of strings |
-
-##### Use variable placeholders
-
-For `Add` and `Set`, if the service is configured to use Envoy as the proxy, the value may contain variables to interpolate dynamic metadata into the value. For example, using the variable `%DOWNSTREAM_REMOTE_ADDRESS%` in your configuration entry allows you to pass a value that is generated at runtime.
-
-### `Listeners[].Services[].ResponseHeaders`
-
-Specifies a set of HTTP-specific header modification rules applied to responses routed through the gateway. You cannot configure response headers if the listener protocol is set to `tcp`. Refer to [HTTP listener with Path-based Routing](#http-listener-with-path-based-routing) for an example configuration.
-
-#### Values
-
-- Default: None
-- Data type: Map containing one or more fields that define header modification rules:
-
- - `Add`: Map of one or more key-value pairs
- - `Set`: Map of one or more key-value pairs
- - `Remove`: Map of one or more key-value pairs
-
-The following table describes how to configure values for request headers:
-
-| Rule | Description | Data type |
-| --- | --- | --- |
-| `Add` | Defines a set of key-value pairs to add to the header. Use header names as the keys. Header names are not case-sensitive. If header values with the same name already exist, the value is appended and Consul applies both headers. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `Set` | Defines a set of key-value pairs to add to the response header or to replace existing header values with. Use header names as the keys. Header names are not case-sensitive. If header values with the same names already exist, Consul replaces the header values. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `Remove` | Defines a list of headers to remove. Consul removes only headers containing exact matches. Header names are not case-sensitive. | List of strings |
-
-##### Use variable placeholders
-
-For `Add` and `Set`, if the service is configured to use Envoy as the proxy, the value may contain variables to interpolate dynamic metadata into the value. For example, using the variable `%DOWNSTREAM_REMOTE_ADDRESS%` in your configuration entry allows you to pass a value that is generated at runtime.
-
-### `Listeners[].Services[].TLS`
-
-Specifies a TLS configuration for a specific service. The settings in this configuration overrides the main [`TLS`](#tls) settings for the configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Listeners[].Services[].TLS.SDS`
-
-Specifies parameters that configure the listener to load TLS certificates from an external SDS. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-This configuration overrides the main [`TLS.SDS`](#tls-sds) settings for configuration entry. If unspecified, Consul applies the top-level [`TLS.SDS`](#tls-sds) settings.
-
-#### Values
-
-- Default: None
-- Data type: Map containing the following fields:
-
- - `ClusterName`
- - `CertResource`
-
-The following table describes how to configure SDS parameters. Refer to [Configure static SDS clusters](/consul/docs/connect/gateways/ingress-gateway/tls-external-service#configure-static-sds-clusters) for usage information:
-
-| Parameter | Description | Data type |
-| `ClusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `CertResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-### `Listeners[].Services[].MaxConnections`
-
-Specifies the maximum number of HTTP/1.1 connections a service instance is allowed to establish against the upstream.
-
-When defined, this field overrides the [`Defaults.MaxConnections`](#defaults-maxconnections) configuration.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `Listeners[].Services.MaxPendingRequests`
-
-Specifies the maximum number of requests that are allowed to queue while waiting to establish a connection. When defined, this field overrides the value specified in the [`Defaults.MaxPendingRequests`](#defaults-maxpendingrequests) field of the configuration entry.
-
-Listeners must use an L7 protocol for this configuration to take effect. Refer to [`Listeners.Protocol`](#listeners-protocol) for more information.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `Listeners[].Services[].MaxConcurrentRequests`
-
-Specifies the maximum number of concurrent HTTP/2 traffic requests that the service is allowed at a single point in time. This field overrides the value set in the [`Defaults.MaxConcurrentRequests`](#defaults-maxconcurrentrequests) field of the configuration entry.
-
-Listeners must use an L7 protocol for this configuration to take effect. Refer to [`Listeners.Protocol`](#listeners-protocol) for more information.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `Listeners[].Services[].PassiveHealthCheck`
-
-Defines a passive health check configuration for the service. Passive health checks remove hosts from the upstream cluster when the service is unreachable or returns errors. This field overrides the value set in the [`Defaults.PassiveHealthCheck`](#defaults-passivehealthcheck) field of the configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the configurations for passive health checks:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
- | `Interval` | Specifies the time between checks. | string | `0s` |
- | `MaxFailures` | Specifies the number of consecutive failures allowed per check interval. If exceeded, Consul removes the host from the load balancer. | integer | `0` |
- | `EnforcingConsecutive5xx` | Specifies a percentage that indicates how many times out of 100 that Consul ejects the host when it detects an outlier status. The outlier status is determined by consecutive errors in the 500-599 response range. | integer | `100` |
- | `MaxEjectionPercent` | Specifies the maximum percentage of an upstream cluster that Consul ejects when the proxy reports an outlier. Consul ejects at least one host when an outlier is detected regardless of the value. | integer | `10` |
- | `BaseEjectionTime` | Specifies the minimum amount of time that an ejected host must remain outside the cluster before rejoining. The real time is equal to the value of the `BaseEjectionTime` multiplied by the number of times the host has been ejected. | string | `30s` |
-
-### `Listeners[].TLS`
-
-Specifies the TLS configuration for the listener. If unspecified, Consul applies any [service-specific TLS configurations](#listeners-services-tls). If neither the listener- nor service-specific TLS configurations are specified, Consul applies the main [`TLS`](#tls) settings for the configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map that can contain the following fields:
- - [`Enabled`](#listeners-tls-enabled)
- - [`TLSMinVersion`](#listeners-tls-tlsminversion)
- - [`TLSMaxVersion`](#listeners-tls-tlsmaxversion)
- - [`CipherSuites`](#listeners-tls-ciphersuites)
- - [`SDS`](#listeners-tls-sds)
-
-### `Listeners[].TLS.Enabled`
-
-Set to `true` to enable built-in TLS for the listener. If enabled, Consul adds each host defined in every service's `Hosts` field to the gateway's x509 certificate as a DNS subject alternative name (SAN).
-
-#### Values
-
- - Default: `false`
- - Data type: boolean
-
-### `Listeners[].TLS.TLSMinVersion`
-
-Specifies the minimum TLS version supported for the listener.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `Listeners[].TLS.TLSMaxVersion`
-
-Specifies the maximum TLS version supported for the listener.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `Listeners[].TLS.CipherSuites`
-
-Specifies a list of cipher suites that the listener supports when negotiating connections using TLS 1.2 or older. If unspecified, the Consul applies the default for the version of Envoy in use. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tls parameters-cipher-suites) for details.
-
-#### Values
-
-- Default: None
-- Data type: List of string values. Refer to the [Consul repository](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) for a list of supported ciphers.
-
-### `Listeners[].TLS.SDS`
-
-Specifies parameters for loading the TLS certificates from an external SDS service. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-Consul applies the SDS configuration specified in this field to all services in the listener. You can override the `Listeners.TLS.SDS` configuration per service by configuring the [`Listeners.Services.TLS.SDS`](#listeners-services-tls-sds) settings for each service.
-
-#### Values
-
-- Default: None
-- The data type is a map containing `ClusterName` and `CertResource` fields.
-
-The following table describes how to configure SDS parameters. Refer to [Configure static SDS clusters](/consul/docs/connect/gateways/ingress-gateway/tls-external-service#configure-static-sds-clusters) for usage information:
-
-| Parameter | Description | Data type |
-| --- | --- | --- |
-| `ClusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `CertResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-
-
-
-
-### `apiVersion`
-
-Kubernetes-only parameter that specifies the version of the Consul API that the configuration entry maps to Kubernetes configurations. The value must be `consul.hashicorp.com/v1alpha1`.
-
-### `kind`
-
-Specifies the type of configuration entry to implement. Must be set to `IngressGateway`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value that must be set to `IngressGateway`.
-
-### `metadata`
-
-Specifies metadata for the gateway.
-
-#### Values
-
-- Default: None
-- This field is required
-- Data type: Map the contains the following fields:
- - [`name`](#metadata-name)
- - [`namespace`](#metadata-namespace)
-
-### `metadata.name`
-
-Specifies a name for the gateway. The name is metadata that you can use to reference the configuration entry when performing Consul operations with the [`consul config` command](/consul/commands/config).
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String
-
-### `metadata.namespace`
-
-Specifies the namespace to apply the configuration entry in. Refer to [Namespaces](/consul/docs/enterprise/namespaces) for additional information about Consul namespaces.
-
-If unspecified, the ingress gateway is applied to the `default` namespace. You can override the namespace when using the [`/config` API endpoint](/consul/api-docs/config) to register the configuration entry by specifying the `ns` query parameter.
-
-#### Values
-
-- Default: `default`,
-- Data type: String
-
-### `spec`
-
-Kubernetes-only field that contains all of the configurations for ingress gateway pods.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map containing the following fields:
- - [`tls`](#tls)
- - [`defaults`](#defaults)
- - [`listeners`](#listeners)
-
-### `spec.tls`
-
-Specifies the TLS configuration settings for the gateway.
-
-#### Values
-
-- Default: No default
-- Data type: Object that can contain the following fields:
- - [`enabled`](#tls-enabled)
- - [`tlsMinVersion`](#spec-tls-tlsminversion)
- - [`tlsMaxVersion`](#spec-tls-tlsmaxversion)
- - [`cipherSuites`](#spec-tls-tlsciphersuites)
- - [`sds`](#spec-tls-sds)
-
-### `spec.tls.enabled`
-
-Enables and disables TLS for the configuration entry. Set to `true` to enable built-in TLS for every listener on the gateway. TLS is disabled by default.
-
-When enabled, Consul adds each host defined in every service's `Hosts` field to the gateway's x509 certificate as a DNS subject alternative name (SAN).
-
-#### Values
-
- - Default: `false`
- - Data type: boolean
-
-### `spec.tls.tlsMinVersion`
-
-Specifies the minimum TLS version supported for gateway listeners.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `spec.tls.tlsMaxVersion`
-
-Specifies the maximum TLS version supported for gateway listeners.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `spec.tls.cipherSuites[]`
-
-Specifies a list of cipher suites that gateway listeners support when negotiating connections using TLS 1.2 or older. If unspecified, the Consul applies the default for the version of Envoy in use. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tls parameters-cipher-suites) for details.
-
-#### Values
-
-- Default: None
-- Data type: List of string values. Refer to the [Consul repository](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) for a list of supported ciphers.
-
-### `spec.tls.sds`
-
-Specifies parameters for loading the TLS certificates from an external SDS service. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-Consul applies the SDS configuration specified in this field as defaults for all listeners defined in the gateway. You can override the SDS settings for per listener or per service defined in the listener. Refer to the following configurations for additional information:
-
-- [`spec.listeners.tls.sds`](#spec-listeners-tls-sds): Configures SDS settings for all services in the listener.
-- [`spec.listeners.services.tls.sds`](#spec-listeners-services-tls-sds): Configures SDS settings for a specific service defined in the listener.
-
-#### Values
-
-- Default: None
-- Data type: Map containing the following fields:
- - [`clusterName`]
- - [`certResource`]
-
-The following table describes how to configure SDS parameters.
-
-| Parameter | Description | Data type |
-| --- | --- | --- |
-| `clusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `certResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-### `spec.defaults`
-
-Specifies default configurations for upstream connections.
-
-#### Values
-
-- Default: None
-- The data type is a map containing the following parameters:
-
- - [`maxConnections`](#spec-defaults-maxconnections)
- - [`maxPendingRequests`](#spec-defaults-maxpendingrequests)
- - [`maxConcurrentRequests`](#spec-defaults-maxconcurrentrequests)
-
-### `spec.defaults.maxConnections`
-
-Specifies the maximum number of HTTP/1.1 connections a service instance is allowed to establish against the upstream. If unspecified, Consul uses Envoy's configuration. The default configuration for Envoy is `1024`.
-
-#### Values
-
-- Default: `0`
-- Data type: Integer
-
-### `spec.defaults.maxPendingRequests`
-
-Specifies the maximum number of requests that are allowed to queue while waiting to establish a connection. Listeners must use an L7 protocol for this configuration to take effect. Refer to [`spec.listeners.Protocol`](#spec-listeners-protocol).
-
-If unspecified, Consul uses Envoy's configuration. The default for Envoy is `1024`.
-
-#### Values
-
-- Default: `0`
-- Data type: Integer
-
-### `spec.defaults.maxConcurrentRequests`
-
-Specifies the maximum number of concurrent HTTP/2 traffic requests that are allowed at a single point in time. Listeners must use an L7 protocol for this configuration to take effect. Refer to [`spec.listeners.protocol`](#spec-listeners-protocol).
-
-If unspecified, Consul uses Envoy's configuration. The default for Envoy is `1024`.
-
-#### Values
-
-- Default: `0`
-- Data type: Integer
-
-### `spec.defaults.passiveHealthCheck`
-
-Defines a passive health check configuration. Passive health checks remove hosts from the upstream cluster when they are unreachable or return errors.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the configurations for passive health checks:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
- | `Interval` | Specifies the time between checks. | string | `0s` |
- | `MaxFailures` | Specifies the number of consecutive failures allowed per check interval. If exceeded, Consul removes the host from the load balancer. | integer | `0` |
- | `EnforcingConsecutive5xx` | Specifies a percentage that indicates how many times out of 100 that Consul ejects the host when it detects an outlier status. The outlier status is determined by consecutive errors in the 500-599 response range. | integer | `100` |
- | `MaxEjectionPercent` | Specifies the maximum percentage of an upstream cluster that Consul ejects when the proxy reports an outlier. Consul ejects at least one host when an outlier is detected regardless of the value. | integer | `10` |
- | `BaseEjectionTime` | Specifies the minimum amount of time that an ejected host must remain outside the cluster before rejoining. The real time is equal to the value of the `BaseEjectionTime` multiplied by the number of times the host has been ejected. | string | `30s` |
-
-### `spec.listeners[]`
-
-Specifies a list of listeners in the mesh for the gateway. Listeners are uniquely identified by their port number.
-
-#### Values
-
-- Default: None
-- Data type: List of maps containing the following fields:
- - [`port`](#spec-listeners-port)
- - [`protocol`](#spec-listeners-protocol)
- - [`services[]`](#spec-listeners-services)
- - [`tls`](#spec-listeners-tls)
-
-### `spec.listeners[].port`
-
-Specifies the port that the listener receives traffic on. The port is bound to the IP address specified in the [`-address`](/consul/commands/connect/envoy#address) flag when starting the gateway. The listener port must not conflict with the health check port.
-
-#### Values
-
-- Default: `0`
-- Data type: Integer
-
-### `spec.listeners[].protocol`
-
-Specifies the protocol associated with the listener. To enable L7 network management capabilities, specify one of the following values:
-
-- `http`
-- `http2`
-- `grpc`
-
-#### Values
-
-- Default: `tcp`
-- Data type: String that contains one of the following values:
-
- - `tcp`
- - `http`
- - `http2`
- - `grpc`
-
-### `spec.listeners[].services[]`
-
-Specifies a list of services that the listener exposes to services outside the mesh. Each service must have a unique name. The `namespace` field is required for Consul Enterprise datacenters. If the listener's [`protocol`](#spec-listeners-protocol) field is set to `tcp`, then Consul can only expose one service. You can expose multiple services if the listener uses any other supported protocol.
-
-#### Values
-
-- Default: None
-- Data type: List of maps that can contain the following fields:
- - [`name`](#spec-listeners-services-name)
- - [`namespace`](#spec-listeners-services-namespace)
- - [`partition`](#spec-listeners-services-partition)
- - [`hosts`](#spec-listeners-services-hosts)
- - [`requestHeaders`](#spec-listeners-services-requestheaders)
- - [`responseHeaders`](#spec-listeners-services-responseheaders)`
- - [`tlsLS`](#spec-listeners-services-tls)
- - [`maxConnections`](#spec-listeners-services-maxconnections)
- - [`maxPendingRequests`](#spec-listeners-services-maxpendingrequests)
- - [`maxConcurrentRequests`](#spec-listeners-services-maxconcurrentrequests)
- - [`passiveHealthCheck`](#spec-listeners-services-passivehealthcheck)
-
-### `spec.listeners[].services[].name`
-
-Specifies the name of a service to expose to the listener. You can specify services in the following ways:
-
-- Provide the name of a service registered in the Consul catalog.
-- Provide the name of a service defined in other configuration entries. Refer to [Service Mesh Traffic Management Overview](/consul/docs/connect/manage-traffic) for additional information. Refer to [HTTP listener with path-based routes](#http-listener-with-path-based-routes) for an example.
-- Provide a `*` wildcard to expose all services in the datacenter. Wild cards are not supported for listeners configured for TCP. Refer to [`spec.listeners.protocol`](#spec-listeners-protocol) for additional information.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `spec.listeners[].services[].namespace`
-
-Specifies the namespace to use when resolving the location of the service.
-
-#### Values
-
-- Default: Current namespace
-- Data type: String
-
-### `spec.listeners[].services[].partition`
-
-Specifies the admin partition to use when resolving the location of the service.
-
-#### Values
-
-- Default: Current partition
-- Data type: String
-
-### `spec.listeners[].services[].hosts[]`
-
-Specifies one or more hosts that the listening services can receive requests on. The ingress gateway proxies external traffic to the specified services when external requests include `host` headers that match a host specified in this field.
-
-If unspecified, Consul matches requests to services using the `.ingress.*` domain. You cannot specify a host for listeners that communicate over TCP. You cannot specify a host when service names are specified with a `*` wildcard. Requests must include the correct host for Consul to proxy traffic to the service.
-
-When TLS is disabled, you can use the `*` wildcard to match all hosts. Disabling TLS may be suitable for testing and learning purposes, but we recommend enabling TLS in production environments.
-
-You can use the wildcard in the left-most DNS label to match a set of hosts. For example, `*.example.com` is valid, but `example.*` and `*-suffix.example.com` are invalid.
-
-#### Values
-
-- Default: None
-- Data type: List of strings or `*`
-
-### `spec.listeners[].services[].requestHeaders`
-
-Specifies a set of HTTP-specific header modification rules applied to requests routed through the gateway. You cannot configure request headers if the listener protocol is set to `tcp`. Refer to [HTTP listener with path-based routing](#http-listener-with-path-based-routing) for an example configuration.
-
-#### Values
-
-- Default: None
-- Data type: Object containing one or more fields that define header modification rules:
-
- - `add`: Map of one or more key-value pairs
- - `set`: Map of one or more key-value pairs
- - `remove`: Map of one or more key-value pairs
-
-The following table describes how to configure values for request headers:
-
-| Rule | Description | Data type |
-| --- | --- | --- |
-| `add` | Defines a set of key-value pairs to add to the header. Use header names as the keys. Header names are not case-sensitive. If header values with the same name already exist, the value is appended and Consul applies both headers. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `set` | Defines a set of key-value pairs to add to the request header or to replace existing header values with. Use header names as the keys. Header names are not case-sensitive. If header values with the same names already exist, Consul replaces the header values. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `remove` | Defines a list of headers to remove. Consul removes only headers containing exact matches. Header names are not case-sensitive. | List of strings |
-
-##### Use variable placeholders
-
-For `add` and `set`, if the service is configured to use Envoy as the proxy, the value may contain variables to interpolate dynamic metadata into the value. For example, using the variable `%DOWNSTREAM_REMOTE_ADDRESS%` in your configuration entry allows you to pass a value that is generated at runtime.
-
-### `spec.listeners[].services[].responseHeaders`
-
-Specifies a set of HTTP-specific header modification rules applied to responses routed through the gateway. You cannot configure response headers if the listener protocol is set to `tcp`. Refer to [HTTP listener with path-based routing](#http-listener-with-path-based-routing) for an example configuration.
-
-#### Values
-
-- Default: None
-- Data type: Map containing one or more fields that define header modification rules:
-
- - `add`: Map of one or more key-value pairs
- - `set`: Map of one or more key-value pairs
- - `remove`: Map of one or more key-value pairs
-
-The following table describes how to configure values for request headers:
-
-| Rule | Description | Data type |
-| --- | --- | --- |
-| `add` | Defines a set of key-value pairs to add to the header. Use header names as the keys. Header names are not case-sensitive. If header values with the same name already exist, the value is appended and Consul applies both headers. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `set` | Defines a set of key-value pairs to add to the request header or to replace existing header values with. Use header names as the keys. Header names are not case-sensitive. If header values with the same names already exist, Consul replaces the header values. You can [use variable placeholders](#use-variable-placeholders). | Map of strings |
-| `remove` | Defines a list of headers to remove. Consul removes only headers containing exact matches. Header names are not case-sensitive. | List of strings |
-
-##### Use variable placeholders
-
-For `add` and `set`, if the service is configured to use Envoy as the proxy, the value may contain variables to interpolate dynamic metadata into the value. For example, using the variable `%DOWNSTREAM_REMOTE_ADDRESS%` in your configuration entry allows you to pass a value that is generated at runtime.
-
-### `spec.listeners[].services[].tls`
-
-Specifies a TLS configuration for a specific service. The settings in this configuration overrides the main [`tls`](#spec.tls) settings for the configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `spec.listeners[].services[].tls.sds`
-
-Specifies parameters that configure the listener to load TLS certificates from an external SDS. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-If unspecified, Consul applies the [`sds`](#spec-tls-sds) settings configured for the ingress gateway. If both are specified, this configuration overrides the settings for configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map containing the following fields:
-
- - `clusterName`
- - `certResource`
-
-The following table describes how to configure SDS parameters. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for usage information:
-
-| Parameter | Description | Data type |
-| --- | --- | --- |
-| `clusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `certResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-### `spec-listeners[].services[].maxConnections`
-
-Specifies the maximum number of HTTP/1.1 connections a service instance is allowed to establish against the upstream.
-
-A value specified in this field overrides the [`maxConnections`](#spec-defaults-maxconnections) field specified in the `defaults` configuration.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `spec.listeners[].services.maxPendingRequests`
-
-Specifies the maximum number of requests that are allowed to queue while waiting to establish a connection. A value specified in this field overrides the [`maxPendingRequests`](#spec-defaults-maxpendingrequests) field specified in the `defaults` configuration.
-
-Listeners must use an L7 protocol for this configuration to take effect. Refer to [`spec.listeners.protocol`](#spec-listeners-protocol) for more information.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `spec.listeners[].services[].maxConcurrentRequests`
-
-Specifies the maximum number of concurrent HTTP/2 traffic requests that the service is allowed at a single point in time. A value specified in this field overrides the [`maxConcurrentRequests`](#spec-defaults-maxconcurrentrequests) field specified in the `defaults` configuration entry.
-
-Listeners must use an L7 protocol for this configuration to take effect. Refer to [`spec.listeners.protocol`](#spec-listeners-protocol) for more information.
-
-#### Values
-
-- Default: None
-- Data type: Integer
-
-### `spec.listeners[].services[].passiveHealthCheck`
-
-Defines a passive health check configuration for the service. Passive health checks remove hosts from the upstream cluster when the service is unreachable or returns errors. Health checks specified for services override the health checks defined in the [`spec.defaults.passiveHealthCheck`](#spec-defaults-passivehealthcheck) configuration.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the configurations for passive health checks:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
- | `Interval` | Specifies the time between checks. | string | `0s` |
- | `MaxFailures` | Specifies the number of consecutive failures allowed per check interval. If exceeded, Consul removes the host from the load balancer. | integer | `0` |
- | `EnforcingConsecutive5xx` | Specifies a percentage that indicates how many times out of 100 that Consul ejects the host when it detects an outlier status. The outlier status is determined by consecutive errors in the 500-599 response range. | integer | `100` |
- | `MaxEjectionPercent` | Specifies the maximum percentage of an upstream cluster that Consul ejects when the proxy reports an outlier. Consul ejects at least one host when an outlier is detected regardless of the value. | integer | `10` |
- | `BaseEjectionTime` | Specifies the minimum amount of time that an ejected host must remain outside the cluster before rejoining. The real time is equal to the value of the `BaseEjectionTime` multiplied by the number of times the host has been ejected. | string | `30s` |
-
-### `spec.listeners[].tls`
-
-Specifies the TLS configuration for the listener. If unspecified, Consul applies any [service-specific TLS configurations](#spec-listeners-services-tls). If neither the listener- nor service-specific TLS configurations are specified, Consul applies the main [`tls`](#tls) settings for the configuration entry.
-
-#### Values
-
-- Default: None
-- Data type: Map that can contain the following fields:
- - [`enabled`](#spec-listeners-tls-enabled)
- - [`tlsMinVersion`](#spec-listeners-tls-tlsminversion)
- - [`tlsMaxVersion`](#spec-listeners-tls-tlsmaxversion)
- - [`cipherSuites`](#spec-listeners-tls-ciphersuites)
- - [`sds`](#spec-listeners-tls-sds)
-
-### `spec.listeners[].tls.enabled`
-
-Set to `true` to enable built-in TLS for the listener. If enabled, Consul adds each host defined in every service's `Hosts` field to the gateway's x509 certificate as a DNS subject alternative name (SAN).
-
-#### Values
-
- - Default: `false`
- - Data type: boolean
-
-### `spec.listeners[].tls.tlsMinVersion`
-
-Specifies the minimum TLS version supported for the listener.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `spec.listeners[].tls.tlsMaxVersion`
-
-Specifies the maximum TLS version supported for the listener.
-
-#### Values
-
-- Default: Depends on the version of Envoy:
- - Envoy v1.22.0 and later: `TLSv1_2`
- - Older versions: `TLSv1_0`
-- Data type: String with one of the following values:
- - `TLS_AUTO`
- - `TLSv1_0`
- - `TLSv1_1`
- - `TLSv1_2`
- - `TLSv1_3`
-
-### `spec.listeners[].tls.cipherSuites`
-
-Specifies a list of cipher suites that the listener supports when negotiating connections using TLS 1.2 or older. If unspecified, the Consul applies the default for the version of Envoy in use. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tls parameters-cipher-suites) for details.
-
-#### Values
-
-- Default: None
-- Data type: List of string values. Refer to the [Consul repository](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169) for a list of supported ciphers.
-
-### `spec.listeners[].tls.sds`
-
-Specifies parameters for loading the TLS certificates from an external SDS service. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
-
-Consul applies the SDS configuration specified in this field to all services in the listener. You can override the `spec.listeners[].tls.sds` configuration per service by configuring the [`spec.listeners.services.tls.sds`](#spec-listeners-services-tls-sds) settings for each service.
-
-#### Values
-
-- Default: None
-- Data type: Map containing the following fields
- - `clusterName`
- - `certResource`
-
-The following table describes how to configure SDS parameters. Refer to [Configure static SDS clusters](/consul/docs/connect/gateways/ingress-gateway/tls-external-service#configure-static-sds-clusters) for usage information:
-
-| Parameter | Description | Data type |
-| --- | --- | --- |
-| `clusterName` | Specifies the name of the SDS cluster where Consul should retrieve certificates. The cluster must be specified in the gateway's bootstrap configuration. | String |
-| `certResource` | Specifies an SDS resource name. Consul requests the SDS resource name when fetching the certificate from the SDS service. When set, Consul serves the certificate to all listeners over TLS unless a listener-specific TLS configuration overrides the SDS configuration. | String |
-
-
-
-
-
-## Examples
-
-Refer to the following examples for common ingress gateway configuration patterns:
-- [Define a TCP listener](#define-a-tcp-listener)
-- [Use wildcards to define listeners](#use-wildcards-to-define-an-http-listener)
-- [HTTP listener with path-based routes](#http-listener-with-path-based-routes)
-
-### Define a TCP listener
-
-The following example sets up a TCP listener on an ingress gateway named `us-east-ingress` that proxies traffic to the `db` service. For Consul Enterprise, the `db` service can only listen for traffic in the `default` namespace inside the `team-frontend` admin partition:
-
-#### Consul CE
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = "us-east-ingress"
-
-Listeners = [
- {
- Port = 3456
- Protocol = "tcp"
- Services = [
- {
- Name = "db"
- }
- ]
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: IngressGateway
-metadata:
- name: us-east-ingress
-spec:
- listeners:
- - port: 3456
- protocol: tcp
- services:
- - name: db
-```
-
-```json
-{
- "Kind": "ingress-gateway",
- "Name": "us-east-ingress",
- "Listeners": [
- {
- "Port": 3456,
- "Protocol": "tcp",
- "Services": [
- {
- "Name": "db"
- }
- ]
- }
- ]
-}
-```
-
-
-
-#### Consul Enterprise
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = "us-east-ingress"
-Namespace = "default"
-Partition = "team-frontend"
-
-Listeners = [
- {
- Port = 3456
- Protocol = "tcp"
- Services = [
- {
- Namespace = "ops"
- Name = "db"
- }
- ]
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: IngressGateway
-metadata:
- name: us-east-ingress
- namespace: default
-spec:
- listeners:
- - port: 3456
- protocol: tcp
- services:
- - name: db
- namespace: ops
-```
-
-```json
-{
- "Kind": "ingress-gateway",
- "Name": "us-east-ingress",
- "Namespace": "default",
- "Partition": "team-frontend",
- "Listeners": [
- {
- "Port": 3456,
- "Protocol": "tcp",
- "Services": [
- {
- "Namespace": "ops",
- "Name": "db"
- }
- ]
- }
- ]
-}
-```
-
-
-
-### Use wildcards to define an HTTP listener
-
-The following example gateway is named `us-east-ingress` and defines two listeners. The first listener is configured to listen on port `8080` and uses a wildcard (`*`) to proxy traffic to all services in the datacenter. The second listener exposes the `api` and `web` services on port `4567` at user-provided hosts.
-
-TLS is enabled on every listener. The `max_connections` of the ingress gateway proxy to each upstream cluster is set to `4096`.
-
-The Consul Enterprise version implements the following additional configurations:
-
-- The ingress gateway is set up in the `default` [namespace](/consul/docs/enterprise/namespaces) and proxies traffic to all services in the `frontend` namespace.
-- The `api` and `web` services are proxied to team-specific [admin partitions](/consul/docs/enterprise/admin-partitions):
-
-#### Consul CE
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = "us-east-ingress"
-
-TLS {
- Enabled = true
-}
-
-Defaults {
- MaxConnections = 4096
-}
-
-Listeners = [
- {
- Port = 8080
- Protocol = "http"
- Services = [
- {
- Name = "*"
- }
- ]
- },
- {
- Port = 4567
- Protocol = "http"
- Services = [
- {
- Name = "api"
- Hosts = ["foo.example.com"]
- },
- {
- Name = "web"
- Hosts = ["website.example.com"]
- }
- ]
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: IngressGateway
-metadata:
- name: us-east-ingress
-spec:
- tls:
- enabled: true
- listeners:
- - port: 8080
- protocol: http
- services:
- - name: '*'
- - port: 4567
- protocol: http
- services:
- - name: api
- hosts: ['foo.example.com']
- - name: web
- hosts: ['website.example.com']
-```
-
-```json
-{
- "Kind": "ingress-gateway",
- "Name": "us-east-ingress",
- "TLS": {
- "Enabled": true
- },
- "Listeners": [
- {
- "Port": 8080,
- "Protocol": "http",
- "Services": [
- {
- "Name": "*"
- }
- ]
- },
- {
- "Port": 4567,
- "Protocol": "http",
- "Services": [
- {
- "Name": "api",
- "Hosts": ["foo.example.com"]
- },
- {
- "Name": "web",
- "Hosts": ["website.example.com"]
- }
- ]
- }
- ]
-}
-```
-
-
-
-#### Consul Enterprise
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = "us-east-ingress"
-Namespace = "default"
-
-TLS {
- Enabled = true
-}
-
-Listeners = [
- {
- Port = 8080
- Protocol = "http"
- Services = [
- {
- Namespace = "frontend"
- Name = "*"
- }
- ]
- },
- {
- Port = 4567
- Protocol = "http"
- Services = [
- {
- Namespace = "frontend"
- Name = "api"
- Hosts = ["foo.example.com"]
- Partition = "api-team"
- },
- {
- Namespace = "frontend"
- Name = "web"
- Hosts = ["website.example.com"]
- Partition = "web-team"
- }
- ]
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: IngressGateway
-metadata:
- name: us-east-ingress
- namespace: default
-spec:
- tls:
- enabled: true
- listeners:
- - port: 8080
- protocol: http
- services:
- - name: '*'
- namespace: frontend
- - port: 4567
- protocol: http
- services:
- - name: api
- namespace: frontend
- hosts: ['foo.example.com']
- partition: api-team
- - name: web
- namespace: frontend
- hosts: ['website.example.com']
- partition: web-team
-```
-
-```json
-{
- "Kind": "ingress-gateway",
- "Name": "us-east-ingress",
- "Namespace": "default",
- "TLS": {
- "Enabled": true
- },
- "Listeners": [
- {
- "Port": 8080,
- "Protocol": "http",
- "Services": [
- {
- "Namespace": "frontend",
- "Name": "*"
- }
- ]
- },
- {
- "Port": 4567,
- "Protocol": "http",
- "Services": [
- {
- "Namespace": "frontend",
- "Name": "api",
- "Hosts": ["foo.example.com"],
- "Partition": "api-team"
- },
- {
- "Namespace": "frontend",
- "Name": "web",
- "Hosts": ["website.example.com"],
- "Partition": "web-team"
- }
- ]
- }
- ]
-}
-```
-
-
diff --git a/website/content/docs/connect/config-entries/mesh.mdx b/website/content/docs/connect/config-entries/mesh.mdx
deleted file mode 100644
index cf75c4bfa4ab..000000000000
--- a/website/content/docs/connect/config-entries/mesh.mdx
+++ /dev/null
@@ -1,587 +0,0 @@
----
-layout: docs
-page_title: Mesh - Configuration Entry Reference
-description: >-
- The mesh configuration entry kind defines global default settings like TLS version requirements for proxies inside the service mesh. Use the reference guide to learn about `""mesh""` config entry parameters and how to control communication with services outside of the mesh.
----
-
-# Mesh Configuration Entry
-
-The `mesh` configuration entry allows you to define a global default configuration that applies to all service mesh proxies.
-Settings in this config entry apply across all namespaces and federated datacenters.
-
-## Sample Configuration Entries
-
-### Mesh-wide TLS Min Version
-
-Enforce that service mesh mTLS traffic uses TLS v1.2 or newer.
-
-
-
-
-
-
-```hcl
-Kind = "mesh"
-TLS {
- Incoming {
- TLSMinVersion = "TLSv1_2"
- }
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
-spec:
- tls:
- incoming:
- tlsMinVersion: TLSv1_2
-```
-
-```json
-{
- "Kind": "mesh",
- "TLS": {
- "Incoming": {
- "TLSMinVersion": "TLSv1_2"
- }
- }
-}
-```
-
-
-
-
-
-
-The `mesh` configuration entry can only be created in the `default` namespace and will apply to proxies across **all** namespaces.
-
-
-
-```hcl
-Kind = "mesh"
-
-TLS {
- Incoming {
- TLSMinVersion = "TLSv1_2"
- }
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
- namespace: default
-spec:
- tls:
- incoming:
- tlsMinVersion: TLSv1_2
-```
-
-```json
-{
- "Kind": "mesh",
- "Namespace": "default",
- "Partition": "default",
- "TLS": {
- "Incoming": {
- "TLSMinVersion": "TLSv1_2"
- }
- }
-}
-```
-
-
-
-
-
-
-Note that the Kubernetes example does not include a `partition` field. Configuration entries are applied on Kubernetes using [custom resource definitions (CRD)](/consul/docs/k8s/crds), which can only be scoped to their own partition.
-
-### Mesh Destinations Only
-
-Only allow transparent proxies to dial addresses in the mesh.
-
-
-
-
-
-
-```hcl
-Kind = "mesh"
-TransparentProxy {
- MeshDestinationsOnly = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
-spec:
- transparentProxy:
- meshDestinationsOnly: true
-```
-
-```json
-{
- "Kind": "mesh",
- "TransparentProxy": {
- "MeshDestinationsOnly": true
- }
-}
-```
-
-
-
-
-
-
-The `mesh` configuration entry can only be created in the `default` namespace and will apply to proxies across **all** namespaces.
-
-
-
-```hcl
-Kind = "mesh"
-
-TransparentProxy {
- MeshDestinationsOnly = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
- namespace: default
-spec:
- transparentProxy:
- meshDestinationsOnly: true
-```
-
-```json
-{
- "Kind": "mesh",
- "Namespace": "default",
- "Partition": "default",
- "TransparentProxy": {
- "MeshDestinationsOnly": true
- }
-}
-```
-
-
-
-
-
-
-Note that the Kubernetes example does not include a `partition` field. Configuration entries are applied on Kubernetes using [custom resource definitions (CRD)](/consul/docs/k8s/crds), which can only be scoped to their own partition.
-
-### Peer Through Mesh Gateways
-
-Set the `PeerThroughMeshGateways` parameter to `true` to route peering control plane traffic through mesh gateways.
-
-
-
-
-
-
-```hcl
-Kind = "mesh"
-Peering {
- PeerThroughMeshGateways = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
-spec:
- peering:
- peerThroughMeshGateways: true
-```
-
-```json
-{
- "Kind": "mesh",
- "Peering": {
- "PeerThroughMeshGateways": true
- }
-}
-```
-
-
-
-
-
-
-You can only set the `PeerThroughMeshGateways` attribute on `mesh` configuration entries in the `default` partition.
-The `default` partition owns the traffic routed through the mesh gateway control plane to Consul servers.
-
-
-
-```hcl
-Kind = "mesh"
-
-Peering {
- PeerThroughMeshGateways = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
- namespace: default
-spec:
- peering:
- peerThroughMeshGateways: true
-```
-
-```json
-{
- "Kind": "mesh",
- "Peering": {
- "PeerThroughMeshGateways": true
- }
-}
-```
-
-
-
-
-
-
-Note that the Kubernetes example does not include a `partition` field. Configuration entries are applied on Kubernetes using [custom resource definitions (CRD)](/consul/docs/k8s/crds), which can only be scoped to their own partition.
-
-### Request Normalization
-
-Enable options under `HTTP.Incoming.RequestNormalization` to apply normalization to all inbound traffic to mesh proxies.
-
-~> **Compatibility warning**: This feature is available as of Consul CE 1.20.1 and Consul Enterprise 1.20.1, 1.19.2, 1.18.3, and 1.15.15. We recommend upgrading to the latest version of Consul to take advantage of the latest features and improvements.
-
-
-
-```hcl
-Kind = "mesh"
-HTTP {
- Incoming {
- RequestNormalization {
- InsecureDisablePathNormalization = false // default false, shown for completeness
- MergeSlashes = true
- PathWithEscapedSlashesAction = "UNESCAPE_AND_FORWARD"
- HeadersWithUnderscoresAction = "REJECT_REQUEST"
- }
- }
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
-spec:
- http:
- incoming:
- requestNormalization:
- insecureDisablePathNormalization: false # default false, shown for completeness
- mergeSlashes: true
- pathWithEscapedSlashesAction: UNESCAPE_AND_FORWARD
- headersWithUnderscoresAction: REJECT_REQUEST
-```
-
-```json
-{
- "Kind": "mesh",
- "HTTP": {
- "Incoming": {
- "RequestNormalization": {
- "InsecureDisablePathNormalization": false,
- "MergeSlashes": true,
- "PathWithEscapedSlashesAction": "UNESCAPE_AND_FORWARD",
- "HeadersWithUnderscoresAction": "REJECT_REQUEST"
- }
- }
- }
-}
-```
-
-
-
-## Available Fields
-
-: nil',
- description:
- 'Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4.',
- yaml: false,
- },
- {
- name: 'metadata',
- children: [
- {
- name: 'name',
- description: 'Must be set to `mesh`',
- },
- {
- name: 'namespace',
- enterprise: true,
- description:
- 'Must be set to `default`. If running Consul Community Edition, the namespace is ignored (see [Kubernetes Namespaces in Consul CE](/consul/docs/k8s/crds#consul-ce)). If running Consul Enterprise see [Kubernetes Namespaces in Consul Enterprise](/consul/docs/k8s/crds#consul-enterprise) for additional information.',
- },
- ],
- hcl: false,
- },
- {
- name: 'TransparentProxy',
- type: 'TransparentProxyConfig: ',
- description:
- 'Controls configuration specific to proxies in `transparent` [mode](/consul/docs/connect/config-entries/service-defaults#mode). Added in v1.10.0.',
- children: [
- {
- name: 'MeshDestinationsOnly',
- type: 'bool: false',
- description: `Determines whether sidecar proxies operating in transparent mode can
- proxy traffic to IP addresses not registered in Consul's mesh. If enabled, traffic will only be proxied
- to upstream proxies or mesh-native services. If disabled, requests will be proxied as-is to the
- original destination IP address. Consul will not encrypt the connection.`,
- },
- ],
- },
- {
- name: 'AllowEnablingPermissiveMutualTLS',
- type: 'bool: false',
- description:
- 'Controls whether `MutualTLSMode=permissive` can be set in the `proxy-defaults` and `service-defaults` configuration entries. '
- },
- {
- name: 'ValidateClusters',
- type: 'bool: false',
- description:
- `Controls whether the clusters the route table refers to are validated. The default value is false. When set to
- false and a route refers to a cluster that does not exist, the route table loads and routing to a non-existent
- cluster results in a 404. When set to true and the route is set to a cluster that do not exist, the route table
- will not load. For more information, refer to
- [HTTP route configuration in the Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route.proto#envoy-v3-api-field-config-route-v3-routeconfiguration-validate-clusters)
- for more details. `,
- },
- {
- name: 'TLS',
- type: 'TLSConfig: ',
- description: 'TLS configuration for the service mesh.',
- children: [
- {
- name: 'Incoming',
- type: 'TLSDirectionConfig: ',
- description: `TLS configuration for inbound mTLS connections targeting
- the public listener on \`connect-proxy\` and \`terminating-gateway\`
- proxy kinds.`,
- children: [
- {
- name: 'TLSMinVersion',
- type: 'string: ""',
- description:
- "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.",
- },
- {
- name: 'TLSMaxVersion',
- type: 'string: ""',
- description: {
- hcl:
- "Set the default maximum TLS version supported. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.3 as a max version for incoming connections.",
- yaml:
- "Set the default maximum TLS version supported. Must be greater than or equal to `tls_min_version`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.3 as a max version for incoming connections.",
- },
- },
- {
- name: 'CipherSuites',
- type: 'array: ',
- description: `Set the default list of TLS cipher suites
- to support when negotiating connections using
- TLS 1.2 or earlier. If unspecified, Envoy will use a
- [default server cipher list](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites).
- The list of supported cipher suites can seen in
- [\`consul/types/tls.go\`](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169)
- and is dependent on underlying support in Envoy. Future
- releases of Envoy may remove currently-supported but
- insecure cipher suites, and future releases of Consul
- may add new supported cipher suites if any are added to
- Envoy.`,
- },
- ],
- },
- {
- name: 'Outgoing',
- type: 'TLSDirectionConfig: ',
- description: `TLS configuration for outbound mTLS connections dialing upstreams
- from \`connect-proxy\` and \`ingress-gateway\`
- proxy kinds.`,
- children: [
- {
- name: 'TLSMinVersion',
- type: 'string: ""',
- description:
- "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.",
- },
- {
- name: 'TLSMaxVersion',
- type: 'string: ""',
- description: {
- hcl:
- "Set the default maximum TLS version supported. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.2 as a max version for outgoing connections, but future Envoy releases [may change this to TLS 1.3](https://github.com/envoyproxy/envoy/issues/9300).",
- yaml:
- "Set the default maximum TLS version supported. Must be greater than or equal to `tls_min_version`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy will default to TLS 1.2 as a max version for outgoing connections, but future Envoy releases [may change this to TLS 1.3](https://github.com/envoyproxy/envoy/issues/9300).",
- },
- },
- {
- name: 'CipherSuites',
- type: 'array: ',
- description: `Set the default list of TLS cipher suites
- to support when negotiating connections using
- TLS 1.2 or earlier. If unspecified, Envoy will use a
- [default server cipher list](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites).
- The list of supported cipher suites can seen in
- [\`consul/types/tls.go\`](https://github.com/hashicorp/consul/blob/v1.11.2/types/tls.go#L154-L169)
- and is dependent on underlying support in Envoy. Future
- releases of Envoy may remove currently-supported but
- insecure cipher suites, and future releases of Consul
- may add new supported cipher suites if any are added to
- Envoy.`,
- },
- ],
- },
- ],
- },
- {
- name: 'HTTP',
- type: 'HTTPConfig: ',
- description: 'HTTP configuration for the service mesh.',
- children: [
- {
- name: 'SanitizeXForwardedClientCert',
- type: 'bool: ',
- description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\`
- for all Envoy proxies. As a result, Consul will not include the \`x-forwarded-client-cert\` header in the next hop.
- If set to \`false\` (default), the XFCC header is propagated to upstream applications.`,
- },
- {
- name: 'Incoming',
- type: 'DirectionalHTTPConfig: ',
- description: `HTTP configuration for inbound traffic to mesh proxies.`,
- children: [
- {
- name: 'RequestNormalization',
- type: 'RequestNormalizationConfig: ',
- description: `Request normalization configuration for inbound traffic to mesh proxies.`,
- children: [
- {
- name: 'InsecureDisablePathNormalization',
- type: 'bool: false',
- description: `Sets the value of the \`normalize_path\` option in the Envoy listener's \`HttpConnectionManager\`. The default value is \`false\`.
- When set to \`true\` in Consul, \`normalize_path\` is set to \`false\` for the Envoy proxy.
- This parameter disables the normalization of request URL paths according to RFC 3986,
- conversion of \`\\\` to \`/\`, and decoding non-reserved %-encoded characters. When using L7
- intentions with path match rules, we recommend enabling path normalization in order
- to avoid match rule circumvention with non-normalized path values.`,
- },
- {
- name: 'MergeSlashes',
- type: 'bool: false',
- description: `Sets the value of the \`merge_slashes\` option in the Envoy listener's \`HttpConnectionManager\`. The default value is \`false\`.
- This option controls the normalization of request URL paths by merging consecutive \`/\` characters. This normalization is not part
- of RFC 3986. When using L7 intentions with path match rules, we recommend enabling this setting to avoid match rule circumvention through non-normalized path values, unless legitimate service
- traffic depends on allowing for repeat \`/\` characters, or upstream services are configured to
- differentiate between single and multiple slashes.`,
- },
- {
- name: 'PathWithEscapedSlashesAction',
- type: 'string: ""',
- description: `Sets the value of the \`path_with_escaped_slashes_action\` option in the Envoy listener's
- \`HttpConnectionManager\`. The default value of this option is empty, which is
- equivalent to \`IMPLEMENTATION_SPECIFIC_DEFAULT\`. This parameter controls the action taken in response to request URL paths with escaped
- slashes in the path. When using L7 intentions with path match rules, we recommend enabling this setting to avoid match rule circumvention through non-normalized path values, unless legitimate service
- traffic depends on allowing for escaped \`/\` or \`\\\` characters, or upstream services are configured to
- differentiate between escaped and unescaped slashes. Refer to the Envoy documentation for more information on available
- options.`,
- },
- {
- name: 'HeadersWithUnderscoresAction',
- type: 'string: ""',
- description: `Sets the value of the \`headers_with_underscores_action\` option in the Envoy listener's
- \`HttpConnectionManager\` under \`common_http_protocol_options\`. The default value of this option is
- empty, which is equivalent to \`ALLOW\`. Refer to the Envoy documentation for more information on available options.`,
- },
- ],
- },
- ],
- }
- ],
- },
- {
- name: 'Peering',
- type: 'PeeringMeshConfig: ',
- description:
- 'Controls configuration specific to [peering connections](/consul/docs/connect/cluster-peering).',
- children: [
- {
- name: 'PeerThroughMeshGateways',
- type: 'bool: ',
- description: `Determines if peering control-plane traffic should be routed through mesh gateways.
- When enabled, dialing cluster attempt to contact peers through their mesh gateway.
- Clusters that accept calls advertise the address of their mesh gateways, rather than the address of their Consul servers.`,
- },
- ],
- },
- ]}
-/>
-
-## ACLs
-
-Configuration entries may be protected by [ACLs](/consul/docs/security/acl).
-
-Reading a `mesh` config entry requires no specific privileges.
-
-Creating, updating, or deleting a `mesh` config entry requires
-`operator:write`.
diff --git a/website/content/docs/connect/config-entries/sameness-group.mdx b/website/content/docs/connect/config-entries/sameness-group.mdx
deleted file mode 100644
index f9fdffcd7a64..000000000000
--- a/website/content/docs/connect/config-entries/sameness-group.mdx
+++ /dev/null
@@ -1,397 +0,0 @@
----
-page_title: Sameness group configuration reference
-description: |-
- Sameness groups enable Consul to associate service instances with the same name deployed to the same namespace as identical services. Learn how to configure a `sameness-group` configuration entry to enable failover between partitions and cluster peers in non-federated networks.
----
-
-# Sameness groups configuration reference
-
-This page provides reference information for sameness group configuration entries. Sameness groups associate identical admin partitions to facilitate traffic between identical services. When partitions are part of the same Consul datacenter, you can create a sameness group by listing them in the `Members[].Partition` field. When partitions are located on remote clusters, you must establish cluster peering connections between remote partitions in order to add them to a sameness group in the `Members[].Peer` field.
-
-To learn more about creating a sameness group, refer to [Create sameness groups](/consul/docs/connect/cluster-peering/usage/create-sameness-groups) or [Create sameness groups on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/create-sameness-groups).
-
-## Configuration model
-
-The following list outlines field hierarchy, language-specific data types, and requirements in the sameness group configuration entry. Click on a property name to view additional details, including default values.
-
-
-
-
-
-- [`Kind`](#kind): string | required | must be set to `sameness-group`
-- [`Name`](#name): string | required
-- [`Partition`](#partition): string | `default`
-- [`DefaultForFailover`](#defaultforfailover): boolean | `false`
-- [`IncludeLocal`](#includelocal): boolean | `false`
-- [`Members`](#members): list of maps | required
- - [`Partition`](#members-partition): string
- - [`Peer`](#members-peer): string
-
-
-
-
-
-- [`apiVersion`](#apiversion): string | required | must be set to `consul.hashicorp.com/v1alpha1`
-- [`kind`](#kind): string | required | must be set to `SamenessGroup`
-- [`metadata`](#metadata): map | required
- - [`name`](#metadata-name): string | required
-- [`spec`](#spec): map | required
- - [`defaultForFailover`](#spec-defaultforfailover): boolean | `false`
- - [`includeLocal`](#spec-includelocal): boolean | `false`
- - [`members`](#spec-members): list of maps | required
- - [`partition`](#spec-members-partition): string
- - [`peer`](#spec-members-peer): string
-
-
-
-
-## Complete configuration
-
-When every field is defined, a sameness group configuration entry has the following form:
-
-
-
-
-
-```hcl
-Kind = "sameness-group" # required
-Name = "" # required
-Partition = ""
-DefaultForFailover = false
-IncludeLocal = true
-Members = [ # required
- { Partition = "" },
- { Peer = "" }
-]
-```
-
-
-
-
-
-```json
-{
- "Kind": "sameness-group", // required
- "Name": "", // required
- "Partition": "",
- "DefaultForFailover": false,
- "IncludeLocal": true,
- "Members": [ // required
- {
- "Partition": ""
- },
- {
- "Peer": ""
- }
- ]
-}
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1 # required
-kind: SamenessGroup # required
-metadata:
- name:
-spec:
- defaultForFailover: false
- includeLocal: true
- members: # required
- - partition:
- - peer:
-```
-
-
-
-
-## Specifications
-
-This section provides details about the fields you can configure in the sameness group configuration entry.
-
-
-
-
-
-### `Kind`
-
-Specifies the type of configuration entry to implement. Must be set to `sameness-group`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value that must be set to `sameness-group`.
-
-### `Name`
-
-Specifies a name for the configuration entry that is used to identify the sameness group. To ensure consistency, use descriptive names and make sure that the same name is used when creating configuration entries to add each member to the sameness group.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String
-
-### `Partition`
-
-Specifies the local admin partition that the sameness group applies to. Refer to [admin partitions](/consul/docs/enterprise/admin-partitions) for more information.
-
-#### Values
-
-- Default: `default`
-- Data type: String
-
-### `DefaultForFailover`
-
-Determines whether the sameness group should be used to establish connections to services with the same name during failover scenarios.
-
-When this field is set to `true`, upstream requests automatically fail over to services in the sameness group according to the order of the members in the `Members` list. It impacts all services on the partition.
-
-When this field is set to `false`, you can use a sameness group for failover by configuring the `Failover` block of a [service resolver configuration entry](/consul/docs/connect/config-entries/service-resolver).
-
-When you [query Consul DNS](/consul/docs/services/discovery/dns-static-lookups) using sameness groups, `DefaultForFailover` must be set to `true`. Otherwise, Consul DNS returns an error.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `IncludeLocal`
-
-Determines whether the local partition should be considered the first member of the sameness group. When this field is set to `true`, DNS queries, upstream requests, and failover traffic returns a health instance from the local partition unless one does not exist.
-
-If you enable this parameter, you do not need to list the local partition as the first member in the group.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `Members`
-
-Specifies the partitions and cluster peers that are members of the sameness group from the perspective of the local partition.
-
-The local partition should be the first member listed unless `IncludeLocal=true`. The order of the members determines their precedence during failover scenarios. If a member is listed but Consul cannot connect to it, failover proceeds with the next healthy member in the list. For an example demonstrating how to configure this parameter, refer to [Failover between sameness groups](#failover-between-members-of-a-sameness-group).
-
-Each partition can belong to a single sameness group. You cannot associate a partition or cluster peer with multiple sameness groups.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: List that can contain maps of the following parameters:
- - [`Partition`](#members-partition)
- - [`Peer`](#members-peer)
-
-### `Members[].Partition`
-
-Specifies a partition in the local datacenter that is a member of the sameness group. Local partitions do not require cluster peering connections before they are added to a sameness group.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Members[].Peer`
-
-Specifies the name of a cluster peer that is a member of the sameness group.
-
-Cluster peering connections must be established before adding a remote partition to the list of members. Refer to [establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering) for more information.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-
-
-
-
-### `apiVersion`
-
-Specifies the version of the Consul API for integrating with Kubernetes. The value must be `consul.hashicorp.com/v1alpha1`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- String value that must be set to `consul.hashicorp.com/v1alpha1`.
-
-### `kind`
-
-Specifies the type of configuration entry to implement. Must be set to `SamenessGroup`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value that must be set to `SamenessGroup`.
-
-### `metadata`
-
-Map that contains an arbitrary name for the configuration entry and the namespace it applies to.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `metadata.name`
-
-Specifies a name for the configuration entry that is used to identify the sameness group. To ensure consistency, use descriptive names and make sure that the same name is used when creating configuration entries to add each member to the sameness group.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String
-
-### `spec`
-
-Map that contains the details about the `SamenessGroup` configuration entry. The `apiVersion`, `kind`, and `metadata` fields are siblings of the spec field. All other configurations are children.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `spec.defaultForFailover`
-
-Determines whether the sameness group should be used to establish connections to services with the same name during failover scenarios. When this field is set to `true`, upstream requests automatically failover to services in the sameness group according to the order of the members in the `spec.members` list. This setting affects all services on the partition.
-
-When this field is set to `false`, you can use a sameness group for failover by configuring the `spec.failover` block of a [service resolver CRD](/consul/docs/connect/config-entries/service-resolver).
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `spec.includeLocal`
-
-Determines whether the local partition should be considered the first member of the sameness group. When this field is set to `true`, DNS queries, upstream requests, and failover traffic target return a healthy instance from the local partition unless a healthy instance does not exist.
-
-If you enable this parameter, you do not need to list the local partition as the first member in the group.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `spec.members`
-
-Specifies the local partitions and cluster peers that are members of the sameness group from the perspective of the local partition.
-
-The local partition should be the first member listed unless `spec.includeLocal: true`. The order of the members determines their precedence during failover scenarios. If a member is listed but Consul cannot connect to it, failover proceeds with the next healthy member in the list. For an example demonstrating how to configure this parameter, refer to [Failover between sameness groups](#failover-between-sameness-groups).
-
-Each partition can belong to a single sameness group. You cannot associate a partition or cluster peer with multiple sameness groups.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: List that can contain maps of the following parameters:
-
- - [`partition`](#spec-members-partition)
- - [`peer`](#spec-members-peer)
-
-### `spec.members[].partition`
-
-Specifies a partition in the local datacenter that is a member of the sameness group. Local partitions do not require cluster peering connections before they are added to a sameness group.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `spec.members[].peer`
-
-Specifies the name of a cluster peer that is a member of the sameness group.
-
-Cluster peering connections must be established before adding a peer to the list of members. Refer to [establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering) for more information.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-
-
-
-## Examples
-
-The following examples demonstrate common sameness group configuration patterns for specific use cases.
-
-### Failover between members of a sameness group
-
-In the following example, the configuration entry defines a sameness group named `products-api` that applies to the `store-east` partition in the local datacenter. The sameness group is configured so that when a service instance in `store-east` fails, Consul attempts to establish a failover connection in the following order:
-
-- Services with the same name in the `store-east` partition
-- Services with the same name in the `inventory-east` partition in the same datacenter
-- Services with the same name in the `store-west` partition of datacenter `dc2`, which has an established cluster peering connection.
-- Services with the same name in the `inventory-west` partition of `dc2`, which has an established cluster peering connection.
-
-
-
-
-
-```hcl
-Kind = "sameness-group"
-Name = "products-api"
-Partition = "store-east"
-Members = [
- { Partition = "store-east" },
- { Partition = "inventory-east" },
- { Peer = "dc2-store-west" },
- { Peer = "dc2-inventory-west" }
-]
-```
-
-
-
-
-
-```json
-{
- "Kind": "sameness-group",
- "Name": "products-api",
- "Partition": "store-east",
- "Members": [
- {
- "Partition": "store-east"
- },
- {
- "Partition": "inventory-east"
- },
- {
- "Peer": "dc2-store-west"
- },
- {
- "Peer": "dc2-inventory-west"
- }
- ]
-}
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: SamenessGroup
-metadata:
- name: products-api
-spec:
- members:
- - partition: store-east
- - partition: inventory-east
- - peer: dc2-store-west
- - peer: dc2-inventory-west
-```
-
-
-
diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx
deleted file mode 100644
index 4512cf1a8148..000000000000
--- a/website/content/docs/connect/config-entries/terminating-gateway.mdx
+++ /dev/null
@@ -1,701 +0,0 @@
----
-layout: docs
-page_title: Terminating Gateway - Configuration Entry Reference
-description: >-
- The terminating gateway configuration entry kind defines behavior to secure outgoing communication between the service mesh and non-mesh services. Use the reference guide to learn about `""terminating-gateway""` config entry parameters and connecting from your service mesh to external or non-mesh services registered with Consul.
----
-
-# Terminating Gateway Configuration Entry
-
-The `terminating-gateway` config entry kind (`TerminatingGateway` on Kubernetes) allows you to configure terminating gateways
-to proxy traffic from services in the Consul service mesh to services registered with Consul that do not have a
-[service mesh sidecar proxy](/consul/docs/connect/proxies). The configuration is associated with the name of a gateway service
-and will apply to all instances of the gateway with that name.
-
-~> [Configuration entries](/consul/docs/agent/config-entries) are global in scope. A configuration entry for a gateway name applies
-across all federated Consul datacenters. If terminating gateways in different Consul datacenters need to route to different
-sets of services within their datacenter then the terminating gateways **must** be registered with different names.
-
-See [Terminating Gateway](/consul/docs/connect/gateways/terminating-gateway) for more information.
-
-## TLS Origination
-
-By specifying a path to a [CA file](/consul/docs/connect/config-entries/terminating-gateway#cafile) connections
-from the terminating gateway will be encrypted using one-way TLS authentication. If a path to a
-[client certificate](/consul/docs/connect/config-entries/terminating-gateway#certfile)
-and [private key](/consul/docs/connect/config-entries/terminating-gateway#keyfile) are also specified connections
-from the terminating gateway will be encrypted using mutual TLS authentication.
-
-~> Setting the `SNI` field is strongly recommended when enabling TLS to a service. If this field is not set,
-Consul will not attempt to verify the Subject Alternative Name fields in the service's certificate.
-
-If none of these are provided, Consul will **only** encrypt connections to the gateway and not
-from the gateway to the destination service.
-
-## Wildcard service specification
-
-Terminating gateways can optionally target all services within a Consul namespace by specifying a wildcard "\*"
-as the service name. Configuration options set on the wildcard act as defaults that can be overridden
-by options set on a specific service name.
-
-Note that if the wildcard specifier is used, and some services in that namespace have a service mesh sidecar proxy,
-traffic from the mesh to those services will be evenly load-balanced between the gateway and their sidecars.
-
-## Sample Config Entries
-
-### Access an external service
-
-
-
-
-Link gateway named "us-west-gateway" with the billing service.
-
-Connections to the external service will be unencrypted.
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-
-Services = [
- {
- Name = "billing"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Services": [
- {
- "Name": "billing"
- }
- ]
-}
-```
-
-
-
-
-
-
-Link gateway named "us-west-gateway" in the default namespace with the billing service in the finance namespace.
-
-Connections to the external service will be unencrypted.
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-Namespace = "default"
-
-Services = [
- {
- Namespace = "finance"
- Name = "billing"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
- namespace: finance
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Namespace": "default",
- "Services": [
- {
- "Namespace": "finance",
- "Name": "billing"
- }
- ]
-}
-```
-
-
-
-
-
-
-### Access an external service over TLS
-
-
-
-
-Link gateway named "us-west-gateway" with the billing service, and specify a CA
-file to be used for one-way TLS authentication.
-
--> **Note**: When not using destinations in transparent proxy mode, you must specify the `CAFile` parameter
-and point to a valid CA bundle in order to properly initiate a TLS
-connection to the destination service. For more information about configuring a gateway for destinations, refer to [Register an External Service as a Destination](/consul/docs/k8s/connect/terminating-gateways#register-an-external-service-as-a-destination).
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-
-Services = [
- {
- Name = "billing"
- CAFile = "/etc/certs/ca-chain.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
- caFile: /etc/certs/ca-chain.cert.pem
- sni: billing.service.com
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Services": [
- {
- "Name": "billing",
- "CAFile": "/etc/certs/ca-chain.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-Link gateway named "us-west-gateway" in the default namespace with the billing service in the finance namespace,
-and specify a CA file to be used for one-way TLS authentication.
-
--> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA
-bundle in order to properly initiate a TLS connection to the destination service.
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-Namespace = "default"
-
-Services = [
- {
- Namespace = "finance"
- Name = "billing"
- CAFile = "/etc/certs/ca-chain.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
- namespace: finance
- caFile: /etc/certs/ca-chain.cert.pem
- sni: billing.service.com
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Namespace": "default",
- "Services": [
- {
- "Namespace": "finance",
- "Name": "billing",
- "CAFile": "/etc/certs/ca-chain.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-### Access an external service over mutual TLS
-
-
-
-
-Link gateway named "us-west-gateway" with the billing service, and specify a CA
-file, key file, and cert file to be used for mutual TLS authentication.
-
--> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA
-bundle in order to properly initiate a TLS connection to the destination service.
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-
-Services = [
- {
- Name = "billing"
- CAFile = "/etc/certs/ca-chain.cert.pem"
- KeyFile = "/etc/certs/gateway.key.pem"
- CertFile = "/etc/certs/gateway.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
- caFile: /etc/certs/ca-chain.cert.pem
- keyFile: /etc/certs/gateway.key.pem
- certFile: /etc/certs/gateway.cert.pem
- sni: billing.service.com
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Services": [
- {
- "Name": "billing",
- "CAFile": "/etc/certs/ca-chain.cert.pem",
- "KeyFile": "/etc/certs/gateway.key.pem",
- "CertFile": "/etc/certs/gateway.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-Link gateway named "us-west-gateway" in the default namespace with the billing service in the finance namespace.
-Also specify a CA file, key file, and cert file to be used for mutual TLS authentication.
-
--> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA
-bundle in order to properly initiate a TLS connection to the destination service.
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-Namespace = "default"
-
-Services = [
- {
- Namespace = "finance"
- Name = "billing"
- CAFile = "/etc/certs/ca-chain.cert.pem"
- KeyFile = "/etc/certs/gateway.key.pem"
- CertFile = "/etc/certs/gateway.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: billing
- namespace: finance
- caFile: /etc/certs/ca-chain.cert.pem
- keyFile: /etc/certs/gateway.key.pem
- certFile: /etc/certs/gateway.cert.pem
- sni: billing.service.com
-```
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Namespace": "default",
- "Services": [
- {
- "Namespace": "finance",
- "Name": "billing",
- "CAFile": "/etc/certs/ca-chain.cert.pem",
- "KeyFile": "/etc/certs/gateway.key.pem",
- "CertFile": "/etc/certs/gateway.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-### Override connection parameters for a specific service
-
-
-
-
-Link gateway named "us-west-gateway" with all services in the datacenter, and configure default certificates for mutual TLS.
-
-Override the SNI and CA file used for connections to the billing service.
-
-
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-
-Services = [
- {
- Name = "*"
- CAFile = "/etc/common-certs/ca-chain.cert.pem"
- KeyFile = "/etc/common-certs/gateway.key.pem"
- CertFile = "/etc/common-certs/gateway.cert.pem"
- },
- {
- Name = "billing"
- CAFile = "/etc/billing-ca/ca-chain.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: '*'
- caFile: /etc/common-certs/ca-chain.cert.pem
- keyFile: /etc/common-certs/gateway.key.pem
- certFile: /etc/common-certs/gateway.cert.pem
- - name: billing
- caFile: /etc/billing-ca/ca-chain.cert.pem
- sni: billing.service.com
-```
-
-
-
-
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Services": [
- {
- "Name": "*",
- "CAFile": "/etc/common-certs/ca-chain.cert.pem",
- "KeyFile": "/etc/common-certs/gateway.key.pem",
- "CertFile": "/etc/common-certs/gateway.cert.pem"
- },
- {
- "Name": "billing",
- "CAFile": "/etc/billing-ca/ca-chain.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-
-
-Link gateway named "us-west-gateway" in the default namespace with all services in the finance namespace,
-and configure default certificates for mutual TLS.
-
-Override the SNI and CA file used for connections to the billing service:
-
-
-
-
-
-```hcl
-Kind = "terminating-gateway"
-Name = "us-west-gateway"
-Namespace = "default"
-
-Services = [
- {
- Namespace = "finance"
- Name = "*"
- CAFile = "/etc/common-certs/ca-chain.cert.pem"
- KeyFile = "/etc/common-certs/gateway.key.pem"
- CertFile = "/etc/common-certs/gateway.cert.pem"
- },
- {
- Namespace = "finance"
- Name = "billing"
- CAFile = "/etc/billing-ca/ca-chain.cert.pem"
- SNI = "billing.service.com"
- }
-]
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: TerminatingGateway
-metadata:
- name: us-west-gateway
-spec:
- services:
- - name: '*'
- namespace: finance
- caFile: /etc/common-certs/ca-chain.cert.pem
- keyFile: /etc/common-certs/gateway.key.pem
- certFile: /etc/common-certs/gateway.cert.pem
- - name: billing
- namespace: finance
- caFile: /etc/billing-ca/ca-chain.cert.pem
- sni: billing.service.com
-```
-
-
-
-
-
-```json
-{
- "Kind": "terminating-gateway",
- "Name": "us-west-gateway",
- "Namespace": "default",
- "Services": [
- {
- "Namespace": "finance",
- "Name": "*",
- "CAFile": "/etc/common-certs/ca-chain.cert.pem",
- "KeyFile": "/etc/common-certs/gateway.key.pem",
- "CertFile": "/etc/common-certs/gateway.cert.pem"
- },
- {
- "Namespace": "finance",
- "Name": "billing",
- "CAFile": "/etc/billing-ca/ca-chain.cert.pem",
- "SNI": "billing.service.com"
- }
- ]
-}
-```
-
-
-
-
-
-
-
-
-## Available Fields
-
-',
- yaml: false,
- },
- {
- name: 'Namespace',
- type: `string: "default"`,
- enterprise: true,
- description:
- 'Specifies the namespace to which the configuration entry will apply. This must match the namespace in which the gateway is registered.' +
- ' If omitted, the namespace will be inherited from [the request](/consul/api-docs/config#ns)' +
- ' or will default to the `default` namespace.',
- yaml: false,
- },
- {
- name: 'Partition',
- type: `string: "default"`,
- enterprise: true,
- description:
- 'Specifies the admin partition to which the configuration entry will apply. This must match the partition in which the gateway is registered.' +
- ' If omitted, the partition will be inherited from [the request](/consul/api-docs/config)' +
- ' or will default to the `default` partition.',
- yaml: false,
- },
- {
- name: 'Meta',
- type: 'map: nil',
- description:
- 'Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4.',
- yaml: false,
- },
- {
- name: 'metadata',
- children: [
- {
- name: 'name',
- description: 'Set to the name of the gateway being configured.',
- },
- {
- name: 'namespace',
- description:
- 'If running Consul Community Edition, the namespace is ignored (see [Kubernetes Namespaces in Consul CE](/consul/docs/k8s/crds#consul-ce)). If running Consul Enterprise see [Kubernetes Namespaces in Consul Enterprise](/consul/docs/k8s/crds#consul-enterprise) for more details.',
- },
- ],
- hcl: false,
- },
- {
- name: 'Services',
- type: 'array: ',
- description: `A list of services or destinations to link
- with the gateway. The gateway will proxy traffic to these services. These linked services
- must be registered with Consul for the gateway to discover their addresses. They must also
- be registered in the same Consul datacenter as the terminating gateway.
- Destinations are an exception to this requirement, and only need to be defined as a service-defaults configuration entry in the same datacenter.
- If Consul ACLs are enabled, the Terminating Gateway's ACL token must grant service:write for all linked services.`,
- children: [
- {
- name: 'Name',
- type: 'string: ""',
- description:
- 'The name of the service to link with the gateway. If the wildcard specifier, `*`, is provided, then ALL services within the namespace will be linked with the gateway.',
- },
- {
- name: 'Namespace',
- enterprise: true,
- type: 'string: ""',
- description:
- 'The namespace of the service. If omitted, the namespace will be inherited from the config entry.',
- },
- {
- name: 'CAFile',
- type: 'string: ""',
- description: `A file path to a PEM-encoded certificate authority.
- The file must be present on the proxy's filesystem.
- The certificate authority is used to verify the authenticity of the service linked with the gateway.
- It can be provided along with a CertFile and KeyFile for mutual TLS authentication, or on its own
- for one-way TLS authentication. If none is provided the gateway will not encrypt the traffic to the destination.`,
- },
- {
- name: 'CertFile',
- type: 'string: ""',
- description: {
- hcl: `A file path to a PEM-encoded certificate.
- The file must be present on the proxy's filesystem.
- The certificate is provided servers to verify the gateway's authenticity. It must be provided if a \`KeyFile\` was specified.`,
- yaml: `A file path to a PEM-encoded certificate.
- The file must be present on the proxy's filesystem.
- The certificate is provided servers to verify the gateway's authenticity. It must be provided if a \`keyFile\` was specified.`,
- },
- },
- {
- name: 'KeyFile',
- type: 'string: ""',
- description: {
- hcl: `A file path to a PEM-encoded private key.
- The file must be present on the proxy's filesystem.
- The key is used with the certificate to verify the gateway's authenticity. It must be provided along if a \`CertFile\` was specified.`,
- yaml: `A file path to a PEM-encoded private key.
- The file must be present on the proxy's filesystem.
- The key is used with the certificate to verify the gateway's authenticity. It must be provided along if a \`certFile\` was specified.`,
- },
- },
- {
- name: 'SNI',
- type: 'string: ""',
- description:
- `An optional hostname or domain name to specify during the TLS handshake. This option will also configure [strict SAN matching](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-certificatevalidationcontext-match-typed-subject-alt-names), which requires
- the external services to have certificates with SANs, not having which will result in \`CERTIFICATE_VERIFY_FAILED\` error.`,
- },
- {
- name: 'DisableAutoHostRewrite',
- type: 'bool: ""',
- description:
- 'When set to true, Terminating Gateway will not modify the incoming requests host header for this service.',
- },
- ],
- },
- ]}
-/>
-
-## ACLs
-
-Configuration entries may be protected by [ACLs](/consul/docs/security/acl).
-
-Reading a `terminating-gateway` config entry requires `service:read` on the `Name`
-field of the config entry.
-
-Creating, updating, or deleting a `terminating-gateway` config entry requires
-`operator:write`.
diff --git a/website/content/docs/connect/configuration.mdx b/website/content/docs/connect/configuration.mdx
deleted file mode 100644
index dd1e8e156d8c..000000000000
--- a/website/content/docs/connect/configuration.mdx
+++ /dev/null
@@ -1,109 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Configuration - Overview
-description: >-
- Learn how to enable and configure Consul's service mesh capabilities in agent configurations, and how to integrate with schedulers like Kubernetes and Nomad. Consul's service mesh capabilities are provided by the ""connect"" subsystem.
----
-
-# Service Mesh Configuration Overview
-
-There are many configuration options exposed for Consul service mesh. The only option
-that must be set is the `connect.enabled` option on Consul servers to enable Consul service mesh.
-All other configurations are optional and have defaults suitable for many environments.
-
-The noun _connect_ is used throughout this documentation to refer to the connect
-subsystem that provides Consul's service mesh capabilities.
-Where you encounter the _noun_ connect, it is usually functionality specific to
-service mesh.
-
-## Agent configuration
-
-Begin by enabling service mesh for your Consul
-cluster. By default, service is disabled. Enabling service mesh requires changing
-the configuration of only your Consul _servers_ (not client agents). To enable
-service mesh, add the following to a new or existing
-[server configuration file](/consul/docs/agent/config/config-files). In an existing cluster, this configuration change requires a Consul server restart, which you can perform one server at a time to maintain availability. In HCL:
-
-
-
-
-```hcl
-connect {
- enabled = true
-}
-```
-
-```json
-{
- "connect": {
- "enabled": true
- }
-}
-```
-
-
-This will enable service mesh and configure your Consul cluster to use the
-built-in certificate authority for creating and managing certificates.
-You may also configure Consul to use an external
-[certificate management system](/consul/docs/connect/ca), such as
-[Vault](https://www.vaultproject.io/).
-
-Services and proxies may always register with service mesh settings, but unless
-service mesh is enabled on the server agents, their attempts to communicate will fail
-because they have no means to obtain or verify service mesh TLS certificates.
-
-Other optional service mesh configurations that you can set in the server
-configuration file include:
-
-- [certificate authority settings](/consul/docs/agent/config/config-files#connect)
-- [token replication](/consul/docs/agent/config/config-files#acl_tokens_replication)
-- [dev mode](/consul/docs/agent/config/cli-flags#_dev)
-- [server host name verification](/consul/docs/agent/config/config-files#tls_internal_rpc_verify_server_hostname)
-
-If you would like to use Envoy as your service mesh proxy you will need to [enable
-gRPC](/consul/docs/agent/config/config-files#grpc_port).
-
-Additionally if you plan on using the observability features of Consul service mesh, it can
-be convenient to configure your proxies and services using [configuration
-entries](/consul/docs/agent/config-entries) which you can interact with using the
-CLI or API, or by creating configuration entry files. You will want to enable
-[centralized service
-configuration](/consul/docs/agent/config/config-files#enable_central_service_config) on
-clients, which allows each service's proxy configuration to be managed centrally
-via API.
-
-!> **Security note:** Enabling service mesh is enough to try the feature but doesn't
-automatically ensure complete security. Please read the [service mesh production
-tutorial](/consul/tutorials/developer-mesh/service-mesh-production-checklist) to understand the additional steps
-needed for a secure deployment.
-
-## Centralized proxy and service configuration
-
-If your network contains many instances of the same service and many colocated sidecar proxies, you can specify global settings for proxies or services in [Configuration Entries](/consul/docs/agent/config-entries). You can override the centralized configurations for individual proxy instances in their
-[sidecar service definitions](/consul/docs/connect/proxies/deploy-sidecar-services),
-and the default protocols for service instances in their [service
-definitions](/consul/docs/services/usage/define-services).
-
-## Schedulers
-
-Consul service mesh is especially useful if you are using an orchestrator like Nomad
-or Kubernetes, because these orchestrators can deploy thousands of service instances
-which frequently move hosts. Sidecars for each service can be configured through
-these schedulers, and in some cases they can automate Consul configuration,
-sidecar deployment, and service registration.
-
-### Nomad
-
-Consul service mesh can be used with Nomad to provide secure service-to-service
-communication between Nomad jobs and task groups. The ability to use the dynamic
-port feature of Nomad makes Consul service mesh particularly easy to use. Learn about how to
-configure Consul service mesh on Nomad by reading the
-[integration documentation](/consul/docs/connect/nomad).
-
-### Kubernetes
-
-The Consul Helm chart can automate much of Consul's service mesh configuration, and
-makes it easy to automatically inject Envoy sidecars into new pods when they are
-deployed. Learn about the [Helm chart](/consul/docs/k8s/helm) in general,
-or if you are already familiar with it, check out its
-[service mesh specific configurations](/consul/docs/k8s/connect).
diff --git a/website/content/docs/connect/connect-internals.mdx b/website/content/docs/connect/connect-internals.mdx
deleted file mode 100644
index 99541f012c3b..000000000000
--- a/website/content/docs/connect/connect-internals.mdx
+++ /dev/null
@@ -1,138 +0,0 @@
----
-layout: docs
-page_title: Service Mesh - How it Works
-description: >-
- Consul's service mesh enforces secure service communication using mutual TLS (mTLS) encryption and explicit authorization. Learn how the service mesh certificate authorities, intentions, and agents work together to provide Consul’s service mesh capabilities.
----
-
-# How Service Mesh Works
-
-This topic describes how many of the core features of Consul's service mesh functionality works.
-It is not a prerequisite,
-but this information will help you understand how Consul service mesh behaves in more complex scenarios.
-
-The noun _connect_ is used throughout this documentation to refer to the connect
-subsystem that provides Consul's service mesh capabilities.
-Where you encounter the _noun_ connect, it is usually functionality specific to
-service mesh.
-
-To try service mesh locally, complete the [Getting Started with Consul service
-mesh](/consul/tutorials/kubernetes-deploy/service-mesh?utm_source=docs)
-tutorial.
-
-## Mutual Transport Layer Security (mTLS)
-
-The core of Consul service mesh is based on [mutual TLS](https://en.wikipedia.org/wiki/Mutual_authentication).
-
-Consul service mesh provides each service with an identity encoded as a TLS certificate.
-This certificate is used to establish and accept connections to and from other
-services. The identity is encoded in the TLS certificate in compliance with
-the [SPIFFE X.509 Identity Document](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md).
-This enables Consul service mesh services to establish and accept connections with
-other SPIFFE-compliant systems.
-
-The client service verifies the destination service certificate
-against the [public CA bundle](/consul/api-docs/connect/ca#list-ca-root-certificates).
-This is very similar to a typical HTTPS web browser connection. In addition
-to this, the client provides its own client certificate to show its
-identity to the destination service. If the connection handshake succeeds,
-the connection is encrypted and authorized.
-
-The destination service verifies the client certificate against the [public CA
-bundle](/consul/api-docs/connect/ca#list-ca-root-certificates). After verifying the
-certificate, the next step depends upon the configured application protocol of
-the destination service. TCP (L4) services must authorize incoming _connections_
-against the configured set of Consul [intentions](/consul/docs/connect/intentions),
-whereas HTTP (L7) services must authorize incoming _requests_ against those same
-intentions. If the intention check responds successfully, the
-connection/request is established. Otherwise the connection/request is
-rejected.
-
-To generate and distribute certificates, Consul has a built-in CA that
-requires no other dependencies, and
-also ships with built-in support for [Vault](/consul/docs/connect/ca/vault). The PKI system is designed to be pluggable
-and can be extended to support any system by adding additional CA providers.
-
-All APIs required for Consul service mesh typically respond in microseconds and impose
-minimal overhead to existing services. To ensure this, Consul service mesh-related API calls
-are all made to the local Consul agent over a loopback interface, and all [agent
-Connect endpoints](/consul/api-docs/agent/connect) implement local caching, background
-updating, and support blocking queries. Most API calls operate on purely local
-in-memory data.
-
-## Agent Caching and Performance
-
-To enable fast responses on endpoints such as the [agent connect
-API](/consul/api-docs/agent/connect), the Consul agent locally caches most Consul service mesh-related
-data and sets up background [blocking queries](/consul/api-docs/features/blocking) against
-the server to update the cache in the background. This allows most API calls
-such as retrieving certificates or authorizing connections to use in-memory
-data and respond very quickly.
-
-All data cached locally by the agent is populated on demand. Therefore, if
-Consul service mesh is not used at all, the cache does not store any data. On first request,
-the data is loaded from the server and cached. The set of data cached is: public
-CA root certificates, leaf certificates, intentions, and service discovery
-results for upstreams. For leaf certificates and intentions, only data related
-to the service requested is cached, not the full set of data.
-
-Further, the cache is partitioned by ACL token and datacenters. This is done
-to minimize the complexity of the cache and prevent bugs where an ACL token
-may see data it shouldn't from the cache. This results in higher memory usage
-for cached data since it is duplicated per ACL token, but with the benefit
-of simplicity and security.
-
-With Consul service mesh enabled, you'll likely see increased memory usage by the
-local Consul agent. The total memory is dependent on the number of intentions
-related to the services registered with the agent accepting Consul service mesh-based
-connections. The other data (leaf certificates and public CA certificates)
-is a relatively fixed size per service. In most cases, the overhead per
-service should be relatively small: single digit kilobytes at most.
-
-The cache does not evict entries due to memory pressure. If memory capacity
-is reached, the process will attempt to swap. If swap is disabled, the Consul
-agent may begin failing and eventually crash. Cache entries do have TTLs
-associated with them and will evict their entries if they're not used. Given
-a long period of inactivity (3 days by default), the cache will empty itself.
-
-## Connections Across Datacenters
-
-A sidecar proxy's [upstream configuration](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference)
-may specify an alternative datacenter or a prepared query that can address services
-in multiple datacenters (such as the [geo failover](/consul/tutorials/developer-discovery/automate-geo-failover) pattern).
-
-[Intentions](/consul/docs/connect/intentions) verify connections between services by
-source and destination name seamlessly across datacenters.
-
-Connections can be made via gateways to enable communicating across network
-topologies, allowing connections between services in each datacenter without
-externally routable IPs at the service level.
-
-## Intention Replication
-
-Intention replication happens automatically but requires the
-[`primary_datacenter`](/consul/docs/agent/config/config-files#primary_datacenter)
-configuration to be set to specify a datacenter that is authoritative
-for intentions. In production setups with ACLs enabled, the
-[replication token](/consul/docs/agent/config/config-files#acl_tokens_replication) must also
-be set in the secondary datacenter server's configuration.
-
-## Certificate Authority Federation
-
-The primary datacenter also acts as the root Certificate Authority (CA) for Consul service mesh.
-The primary datacenter generates a trust-domain UUID and obtains a root certificate
-from the configured CA provider which defaults to the built-in one.
-
-Secondary datacenters fetch the root CA public key and trust-domain ID from the
-primary and generate their own key and Certificate Signing Request (CSR) for an
-intermediate CA certificate. This CSR is signed by the root in the primary
-datacenter and the certificate is returned. The secondary datacenter can now use
-this intermediate to sign new Consul service mesh certificates in the secondary datacenter
-without WAN communication. CA keys are never replicated between datacenters.
-
-The secondary maintains watches on the root CA certificate in the primary. If the
-CA root changes for any reason such as rotation or migration to a new CA, the
-secondary automatically generates new keys and has them signed by the primary
-datacenter's new root before initiating an automatic rotation of all issued
-certificates in use throughout the secondary datacenter. This makes CA root key
-rotation fully automatic and with zero downtime across multiple datacenters.
diff --git a/website/content/docs/connect/connectivity-tasks.mdx b/website/content/docs/connect/connectivity-tasks.mdx
deleted file mode 100644
index bd5a4bc66f66..000000000000
--- a/website/content/docs/connect/connectivity-tasks.mdx
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: docs
-page_title: Gateway Types
-description: >-
- Ingress, terminating, and mesh gateways are proxies that direct traffic into, out of, and inside of Consul's service mesh. Learn how these gateways enable different kinds of service-to-service communication.
----
-
-# Types of Gateway Connections in a Service Mesh
-
-~> **Note**: The features shown below are extensions of Consul's service mesh capabilities. If you are not utilizing
-Consul service mesh then these features will not be relevant to your task.
-
-## Service-to-service traffic between Consul datacenters
-
--> **1.6.0+:** This feature is available in Consul versions 1.6.0 and newer.
-
-Mesh gateways enable routing of service mesh traffic between different Consul datacenters. Those datacenters can reside
-in different clouds or runtime environments where general interconnectivity between all services in all datacenters
-isn't feasible. One scenario where this is useful is when connecting networks with overlapping IP address space.
-
-These gateways operate by sniffing the SNI header out of the mTLS connection and then routing the connection to the
-appropriate destination based on the server name requested.
-
-As of Consul 1.8.0, mesh gateways can also forward gossip and RPC traffic between Consul servers.
-This is enabled by [WAN federation via mesh gateways](/consul/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways).
-
-As of Consul 1.14.0, mesh gateways can route both data-plane (service-to-service) and control-plane (consul-to-consul) traffic for peered clusters.
-See [Mesh Gateways for Peering Control Plane Traffic](/consul/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways)
-
-For more information about mesh gateways, review the [complete documentation](/consul/docs/connect/gateways/mesh-gateway)
-and the [mesh gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways).
-
-
-
-## Traffic from outside the Consul service mesh to services in the mesh
-
--> **1.8.0+:** This feature is available in Consul versions 1.8.0 and newer.
-
-Ingress gateways are an entrypoint for outside traffic. They enable potentially unauthenticated ingress traffic from
-services outside the Consul service mesh to services inside the service mesh.
-
-These gateways allow you to define what services should be exposed, on what port, and by what hostname. You configure
-an ingress gateway by defining a set of listeners that can map to different sets of backing services.
-
-Ingress gateways are tightly integrated with Consul's L7 configuration and enable dynamic routing of HTTP requests by
-attributes like the request path.
-
-For more information about ingress gateways, review the [complete documentation](/consul/docs/connect/gateways/ingress-gateway)
-and the [ingress gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways).
-
-
-
-## Traffic from services in the Consul service mesh to external services
-
--> **1.8.0+:** This feature is available in Consul versions 1.8.0 and newer.
-
-Terminating gateways enable connectivity from services in the Consul service mesh to services outside the mesh.
-Services outside the mesh do not have sidecar proxies or are not [integrated natively](/consul/docs/connect/native).
-These may be services running on legacy infrastructure or managed cloud services running on
-infrastructure you do not control.
-
-Terminating gateways effectively act as egress proxies that can represent one or more services. They terminate service mesh
-mTLS connections, enforce Consul intentions, and forward requests to the appropriate destination.
-
-These gateways also simplify authorization from dynamic service addresses. Consul's intentions determine whether
-connections through the gateway are authorized. Then traditional tools like firewalls or IAM roles can authorize the
-connections from the known gateway nodes to the destination services.
-
-For more information about terminating gateways, review the [complete documentation](/consul/docs/connect/gateways/terminating-gateway)
-and the [terminating gateway tutorial](/consul/tutorials/developer-mesh/terminating-gateways-connect-external-services).
-
-
diff --git a/website/content/docs/connect/dataplane/consul-dataplane.mdx b/website/content/docs/connect/dataplane/consul-dataplane.mdx
deleted file mode 100644
index 4bbc9602a74f..000000000000
--- a/website/content/docs/connect/dataplane/consul-dataplane.mdx
+++ /dev/null
@@ -1,181 +0,0 @@
----
-layout: docs
-page_title: Consul Dataplane CLI Reference
-description: >-
- Consul Dataplane runs as a separate binary controlled with the `consul-dataplane` CLI command. Learn how to use this command to configure your dataplane on Kubernetes with this reference guide and example code.
----
-
-# Consul Dataplane CLI Reference
-
-The `consul-dataplane` command interacts with the binary for [simplified service mesh with Consul Dataplane](/consul/docs/connect/dataplane). Use this command to install Consul Dataplane, configure its Envoy proxies, and secure Dataplane deployments.
-
-## Usage
-
-Usage: `consul-dataplane [options]`
-
-### Requirements
-
-Consul Dataplane requires servers running Consul version `v1.14+`. To find a specific version of Consul, refer to [HashiCorp's Official Release Channels](https://www.hashicorp.com/official-release-channels).
-
-### Startup
-
-The following options are required when starting `consul-dataplane` with the CLI:
-
-
-
-
-- `-addresses`
-- `-service-node-name`
-- `-proxy-service-id`
-
-
-
-
-
-- `-addresses`
-- `-service-node-name`
-- `-service-namespace`
-- `-service-partition`
-- `-proxy-service-id`
-
-
-
-
-
-### Command Options
-
-- `-addresses` - Consul server gRPC addresses. Can be a DNS name or an executable command. Accepted environment variable is `DP_CONSUL_ADDRESSES`. Refer to [go-netaddrs](https://github.com/hashicorp/go-netaddrs#summary) for details and examples.
-- `-ca-certs` - The path to a file or directory containing CA certificates used to verify the server's certificate. Accepted environment variable is `DP_CA_CERTS`.
-- `-consul-dns-bind-addr` - The address bound to the Consul DNS proxy. Default is `"127.0.0.1"`. Accepted environment variable is `DP_CONSUL_DNS_BIND_ADDR`.
-- `-consul-dns-bind-port` - The port that the Consul DNS proxy listens on. Default is `-1`, which disables the DNS proxy. Accepted environment variable is `DP_CONSUL_DNS_BIND_PORT`.
-- `-credential-type` - The type of credentials used to authenticate with Consul servers, either `"static"` or `"login"`. Accepted environment variable is `DP_CREDENTIAL_TYPE`.
-- `-envoy-admin-bind-address` - The address the Envoy admin server is available on. Default is `"127.0.0.1"`. Accepted environment variable is `DP_ENVOY_ADMIN_BIND_ADDRESS`.
-- `-envoy-admin-bind-port` - The port the Envoy admin server is available on. Default is `19000`. Accepted environment variable is `DP_ENVOY_ADMIN_BIND_PORT`.
-- `-envoy-concurrency` - The number of worker threads that Envoy uses. Default is `2`. Accepted environment variable is `DP_ENVOY_CONCURRENCY`.
-- `-envoy-ready-bind-address` - The address Envoy's readiness probe is available on. Accepted environment variable is `DP_ENVOY_READY_BIND_ADDRESS`.
-- `-envoy-ready-bind-port` - The port Envoy's readiness probe is available on. Accepted environment variable is `DP_ENVOY_READY_BIND_PORT`.
-- `-graceful-port` - The port to serve HTTP endpoints for graceful operations. Accepted environment variable is `DP_GRACEFUL_PORT`.
-- `-graceful-shutdown-path` - The HTTP path to serve the graceful shutdown endpoint. Accepted environment variable is `DP_GRACEFUL_SHUTDOWN_PATH`.
-- `-grpc-port` - The Consul server gRPC port to which `consul-dataplane` connects. Default is `8502`. Accepted environment variable is `DP_CONSUL_GRPC_PORT`.
-- `-log-json` - Enables log messages in JSON format. Default is `false`. Accepted environment variable is `DP_LOG_JSON`.
-- `-log-level` - Log level of the messages to print. Available log levels are `"trace"`, `"debug"`, `"info"`, `"warn"`, and `"error"`. Default is `"info"`. Accepted environment variable is `DP_LOG_LEVEL`.
-- `-login-auth-method` - The auth method used to log in. Accepted environment variable is `DP_CREDENTIAL_LOGIN_AUTH_METHOD`.
-- `-login-bearer-token` - The bearer token presented to the auth method. Accepted environment variable is `DP_CREDENTIAL_LOGIN_BEARER_TOKEN`.
-- `-login-bearer-token-path` - The path to a file containing the bearer token presented to the auth method. Accepted environment variable is `DP_CREDENTIAL_LOGIN_BEARER_TOKEN_PATH`.
-- `-login-datacenter` - The datacenter containing the auth method. Accepted environment variable is `DP_CREDENTIAL_LOGIN_DATACENTER`.
-- `-login-meta` - A set of key/value pairs to attach to the ACL token. Each pair is formatted as `=`. This flag may be passed multiple times. Accepted environment variables are `DP_CREDENTIAL_LOGIN_META{1..9}`.
-- `-login-namespace` - The Consul Enterprise namespace containing the auth method. Accepted environment variable is `DP_CREDENTIAL_LOGIN_NAMESPACE`.
-- `-login-partition` - The Consul Enterprise partition containing the auth method. Accepted environment variable is `DP_CREDENTIAL_LOGIN_PARTITION`.
-- `-proxy-service-id` - The proxy service instance's ID. Accepted environment variable is `DP_PROXY_SERVICE_ID`.
-- `-proxy-service-id-path` - The path to a file containing the proxy service instance's ID. Accepted environment variable is `DP_PROXY_SERVICE_ID_PATH`.
-- `-server-watch-disabled` - Prevent `consul-dataplane` from consuming the server update stream. Use this flag when Consul servers are behind a load balancer. Default is `false`. Accepted environment variable is `DP_SERVER_WATCH_DISABLED`.
-- `-service-namespace` - The Consul Enterprise namespace in which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_NAMESPACE`.
-- `-service-node-id` - The ID of the Consul node to which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_NODE_ID`.
-- `-service-node-name` - The name of the Consul node to which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_NODE_NAME`.
-- `-service-partition` - The Consul Enterprise partition in which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_PARTITION`.
-- `-shutdown-drain-listeners` - Wait for proxy listeners to drain before terminating the proxy container. Accepted environment variable is `DP_SHUTDOWN_DRAIN_LISTENERS`.
-- `-shutdown-grace-period-seconds` - Amount of time to wait after receiving a SIGTERM signal before terminating the proxy. Accepted environment variable is `DP_SHUTDOWN_GRACE_PERIOD_SECONDS`.
-- `-static-token` - The ACL token used to authenticate requests to Consul servers when `-credential-type` is set to `"static"`. Accepted environment variable is `DP_CREDENTIAL_STATIC_TOKEN`.
-- `-telemetry-prom-ca-certs-path` - The path to a file or directory containing CA certificates used to verify the Prometheus server's certificate. Accepted environment variable is `DP_TELEMETRY_PROM_CA_CERTS_PATH`.
-- `-telemetry-prom-cert-file` - The path to the client certificate used to serve Prometheus metrics. Accepted environment variable is `DP_TELEMETRY_PROM_CERT_FILE`.
-- `-telemetry-prom-key-file` - The path to the client private key used to serve Prometheus metrics. Accepted environment variable is `DP_TELEMETRY_PROM_KEY_FILE`.
-- `-telemetry-prom-merge-port` - The local port used to serve merged Prometheus metrics. Default is `20100`. If your service instance uses the same default port, this flag must be set to a different port in order to avoid a port conflict. Accepted environment variable is `DP_TELEMETRY_PROM_MERGE_PORT`.
-- `-telemetry-prom-retention-time` - The duration for Prometheus metrics aggregation. Default is `1m0s`. Accepted environment variable is `DP_TELEMETRY_PROM_RETENTION_TIME`. Refer to [`prometheus_retention_time`](/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) for details on setting this value.
-- `-telemetry-prom-scrape-path` - The URL path where Envoy serves Prometheus metrics. Default is `"/metrics"`. Accepted environment variable is `DP_TELEMETRY_PROM_SCRAPE_PATH`.
-- `-telemetry-prom-service-metrics-url` - The URL where your service instance serves Prometheus metrics. If this is set, the metrics at this URL are included in Consul Dataplane's merged Prometheus metrics. Accepted environment variable is `DP_TELEMETRY_PROM_SERVICE_METRICS_URL`.
-- `-telemetry-use-central-config` - Controls whether the proxy applies the central telemetry configuration. Default is `true`. Accepted environment variable is `DP_TELEMETRY_USE_CENTRAL_CONFIG`.
-- `-tls-cert` - The path to a client certificate file. This flag is required if `tls.grpc.verify_incoming` is enabled on the server. Accepted environment variable is `DP_TLS_CERT`.
-- `-tls-disabled` - Communicate with Consul servers over a plaintext connection. Useful for testing, but not recommended for production. Default is `false`. Accepted environment variable is `DP_TLS_DISABLED`.
-- `-tls-insecure-skip-verify` - Do not verify the server's certificate. Useful for testing, but not recommended for production. Default is `false`. `DP_TLS_INSECURE_SKIP_VERIFY`.
-- `-tls-key` - The path to a client private key file. This flag is required if `tls.grpc.verify_incoming` is enabled on the server. Accepted environment variable is `DP_TLS_KEY`.
-- `-tls-server-name` - The hostname to expect in the server certificate's subject. This flag is required if `-addresses` is not a DNS name. Accepted environment variable is `DP_TLS_SERVER_NAME`.
-- `-version` - Print the current version of `consul-dataplane`.
-- `-xds-bind-addr` - The address the Envoy xDS server is available on. Default is `"127.0.0.1"`. Accepted environment variable is `DP_XDS_BIND_ADDR`.
-- `-xds-bind-port` - The port on which the Envoy xDS server is available. Default is `0`. When set to `0`, an available port is selected at random. Accepted environment variable is `DP_XDS_BIND_PORT`.
-
-## Examples
-
-### DNS
-
-Consul Dataplane resolves a domain name to discover Consul server IP addresses.
-
- ```shell-session
- $ consul-dataplane -addresses my.consul.example.com
- ```
-
-### Executable Command
-
-Consul Dataplane runs a script that, on success, returns one or more IP addresses separated by whitespace.
-
- ```shell-session
- $ ./my-script.sh
- 172.20.0.1
- 172.20.0.2
- 172.20.0.3
-
- $ consul-dataplane -addresses "exec=./my-script.sh"
- ```
-
-### Go Discover Nodes for Cloud Providers
-
-The [`go-discover`](https://github.com/hashicorp/go-discover) binary is included in the `hashicorp/consul-dataplane` image for use with this mode of server discovery, which functions in
- a way similar to [Cloud Auto-join](/consul/docs/install/cloud-auto-join). The
- following example demonstrates how to use the `go-discover` binary with Consul Dataplane.
-
- ```shell-session
- $ consul-dataplane -addresses "exec=discover -q addrs provider=aws region=us-west-2 tag_key=consul-server tag_value=true"
- ```
-
-### Static token
-
-A static ACL token is passed to Consul Dataplane.
-
- ```shell-session
- $ consul-dataplane -credential-type "static"` -static-token "12345678-90ab-cdef-0000-12345678abcd"
- ```
-
-### Auth method login
-
-Consul Dataplane logs in to one of Consul's supported [auth methods](/consul/docs/security/acl/auth-methods).
-
-
-
-
- ```shell-session
- $ consul-dataplane -credential-type "login"
- -login-auth-method \
- -login-bearer-token \ ## Or -login-bearer-token-path
- -login-datacenter \
- -login-meta key1=val1 -login-meta key2=val2 \
- ```
-
-
-
-
-
- ```shell-session
- $ consul-dataplane -credential-type "login"
- -login-auth-method \
- -login-bearer-token \ ## Or -login-bearer-token-path
- -login-datacenter \
- -login-meta key1=val1 -login-meta key2=val2 \
- -login-namespace \
- -login-partition
- ```
-
-
-
-
-### Consul Servers Behind a Load Balancer
-
-When Consul servers are behind a load balancer, you must pass `-server-watch-disabled` to Consul
-Dataplane.
-
-```shell-session
-$ consul-dataplane -server-watch-disabled
-```
-
-By default, Consul Dataplane opens a server watch stream to a Consul server, which enables the server
-to inform Consul Dataplane of new or different Consul server addresses. However, if Consul Dataplane
-is connecting through a load balancer, then it must ignore the Consul server addresses that are
-returned from the server watch stream.
diff --git a/website/content/docs/connect/dataplane/index.mdx b/website/content/docs/connect/dataplane/index.mdx
deleted file mode 100644
index e7a386324759..000000000000
--- a/website/content/docs/connect/dataplane/index.mdx
+++ /dev/null
@@ -1,163 +0,0 @@
----
-layout: docs
-page_title: Simplified Service Mesh with Consul Dataplane
-description: >-
- Consul Dataplane removes the need to run a client agent for service discovery and service mesh by leveraging orchestrator functions. Learn about Consul Dataplane, how it can lower latency for Consul on Kubernetes and AWS ECS, and how it enables Consul support for AWS Fargate and GKE Autopilot.
----
-
-# Simplified Service Mesh with Consul Dataplane
-
-This topic provides an overview of Consul Dataplane, a lightweight process for managing Envoy proxies. Consul Dataplane removes the need to run client agents on every node in a cluster for service discovery and service mesh. Instead, Consul deploys sidecar proxies that provide lower latency, support additional runtimes, and integrate with cloud infrastructure providers.
-
-## Supported environments
-
-- Dataplanes can connect to Consul servers v1.14.0 and newer.
-- Dataplanes on Kubernetes requires Consul K8s v1.0.0 and newer.
-- Dataplanes on AWS Elastic Container Services (ECS) requires Consul ECS v0.7.0 and newer.
-
-## What is Consul Dataplane?
-
-When deployed to virtual machines or bare metal environments, the Consul control plane requires _server agents_ and _client agents_. Server agents maintain the service catalog and service mesh, including its security and consistency, while client agents manage communications between service instances, their sidecar proxies, and the servers. While this model is optimal for applications deployed on virtual machines or bare metal servers, orchestrators such as Kubernetes and ECS have native components that support health checking and service location functions typically provided by the client agent.
-
-Consul Dataplane manages Envoy proxies and leaves responsibility for other functions to the orchestrator. As a result, it removes the need to run client agents on every node. In addition, services no longer need to be reregistered to a local client agent after restarting a service instance, as a client agent’s lack of access to persistent data storage in container-orchestrated deployments is no longer an issue.
-
-The following diagram shows how Consul Dataplanes facilitate service mesh in a Kubernetes-orchestrated environment.
-
-
-
-### Impact on performance
-
-Consul Dataplanes replace node-level client agents and function as sidecars attached to each service instance. Dataplanes handle communication between Consul servers and Envoy proxies, using fewer resources than client agents. Consul servers need to consume additional resources in order to generate xDS resources for Envoy proxies.
-
-As a result, small deployments require fewer overall resources. For especially large deployments or deployments that expect to experience high levels of churn, consider the following impacts to your network's performance:
-
-1. In our internal tests, which used 5000 proxies and services flapping every 2 seconds, additional CPU utilization remained under 10% on the control plane.
-1. As you deploy more services, the resource usage for dataplanes grows on a linear scale.
-1. Envoy reconfigurations are rate limited to prevent excessive configuration changes from generating significant load on the servers.
-1. To avoid generating significant load on an individual server, proxy configuration is load balanced proactively.
-1. The frequency of the orchestrator's liveness and readiness probes determine how quickly Consul's control plane can become aware of failures. There is no impact on service mesh applications, however, as Envoy proxies have a passive ability to detect endpoint failure and steer traffic to healthy instances.
-
-## Benefits
-
-**Fewer networking requirements**: Without client agents, Consul does not require bidirectional network connectivity across multiple protocols to enable gossip communication. Instead, it requires a single gRPC connection to the Consul servers, which significantly simplifies requirements for the operator.
-
-**Simplified set up**: Because there are no client agents to engage in gossip, you do not have to generate and distribute a gossip encryption key to agents during the initial bootstrapping process. Securing agent communication also becomes simpler, with fewer tokens to track, distribute, and rotate.
-
-**Additional environment and runtime support**: Consul on Kubernetes versions _prior_ to v1.0 (Consul v1.14) require the use of hostPorts and DaemonSets for client agents, which limits Consul’s ability to be deployed in environments where those features are not supported.
-As of Consul on Kubernetes version 1.0 (Consul 1.14) with the new Consul Dataplane, `hostPorts` are no longer required and Consul now supports AWS Fargate and GKE Autopilot.
-
-**Easier upgrades**: With Consul Dataplane, updating Consul to a new version no longer requires upgrading client agents. Consul Dataplane also has better compatibility across Consul server versions, so the process to upgrade Consul servers becomes easier.
-
-## Get started
-
-To get started with Consul Dataplane, use the following reference resources:
-
-- For `consul-dataplane` commands and usage examples, including required flags for startup, refer to the [`consul-dataplane` CLI reference](/consul/docs/connect/dataplane/consul-dataplane).
-- For Helm chart information, refer to the [Helm Chart reference](/consul/docs/k8s/helm).
-- For Envoy, Consul, and Consul Dataplane version compatibility, refer to the [Envoy compatibility matrix](/consul/docs/connect/proxies/envoy).
-- For Consul on ECS workloads, refer to [Consul on AWS Elastic Container Service (ECS) Overview](/consul/docs/ecs).
-
-### Installation
-
-
-
-
-
-To install Consul Dataplane, set `VERSION` to `1.0.0` and then follow the instructions to install a specific version of Consul [with the Helm Chart](/consul/docs/k8s/installation/install#install-consul) or [with the Consul-k8s CLI](/consul/docs/k8s/installation/install-cli#install-a-previous-version).
-
-#### Helm
-
-```shell-session
-$ export VERSION=1.0.0
-$ helm install consul hashicorp/consul --set global.name=consul --version ${VERSION} --create-namespace --namespace consul
-```
-
-#### Consul-k8s CLI
-
-```shell-session
-$ export VERSION=1.0.0 && \
- curl --location "https://releases.hashicorp.com/consul-k8s/${VERSION}/consul-k8s_${VERSION}_darwin_amd64.zip" --output consul-k8s-cli.zip
-```
-
-
-
-
-Refer to the following documentation for Consul on ECS workloads:
-
-- [Deploy Consul with the Terraform module](/consul/docs/ecs/deploy/terraform)
-- [Deploy Consul manually](/consul/docs/ecs/deploy/manual)
-
-
-
-
-
-### Namespace ACL permissions
-
-If ACLs are enabled, exported services between partitions that use dataplanes may experience errors when you define namespace partitions with the `*` wildcard. Consul dataplanes use a token with the `builtin/service` policy attached, but this policy does not include access to all namespaces.
-
-Add the following policies to the service token attached to Consul dataplanes to grant Consul access to exported services across all namespaces:
-
-```hcl
-partition "default" {
- namespace "default" {
- query_prefix "" {
- policy = "read"
- }
- }
-}
-
-partition_prefix "" {
- namespace_prefix "" {
- node_prefix "" {
- policy = "read"
- }
- service_prefix "" {
- policy = "read"
- }
- }
-}
-```
-
-### Upgrading
-
-
-
-
-
-Before you upgrade Consul to a version that uses Consul Dataplane, you must edit your Helm chart so that client agents are removed from your deployments. Refer to [upgrading to Consul Dataplane](/consul/docs/k8s/upgrade#upgrading-to-consul-dataplanes) for more information.
-
-
-
-
-
-Refer to [Upgrade to dataplane architecture](/consul/docs/ecs/upgrade-to-dataplanes) for instructions.
-
-
-
-
-
-## Feature support
-
-Consul Dataplane on Kubernetes supports the following features:
-
-- Single and multi-cluster installations, including those with WAN federation, cluster peering, and admin partitions are supported.
-- Ingress, terminating, and mesh gateways are supported.
-- Running Consul service mesh in AWS Fargate and GKE Autopilot is supported.
-- xDS load balancing is supported.
-- Servers running in Kubernetes and servers external to Kubernetes are both supported.
-- HCP Consul Dedicated is supported.
-- Consul API Gateway
-
-Consul Dataplane on ECS support the following features:
-
-- Single and multi-cluster installations, including those with WAN federation, cluster peering, and admin partitions
-- Mesh gateways
-- Running Consul service mesh in AWS Fargate and EC2
-- xDS load balancing
-- Self-managed Enterprise and HCP Consul Dedicated servers
-
-### Technical Constraints
-
-- Consul Dataplane is not supported on Windows.
-- Consul Dataplane requires the `NET_BIND_SERVICE` capability. Refer to [Set capabilities for a Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container) in the Kubernetes Documentation for more information.
-- When ACLs are enabled, dataplanes use the [service token](/consul/docs/security/acl/tokens/create/create-a-service-token) and the `builtin/service` policy for their default permissions.
diff --git a/website/content/docs/connect/dataplane/telemetry.mdx b/website/content/docs/connect/dataplane/telemetry.mdx
deleted file mode 100644
index ce111e4872ba..000000000000
--- a/website/content/docs/connect/dataplane/telemetry.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: docs
-page_title: Consul Dataplane - Enable Telemetry Metrics
-description: >-
- Configure telemetry to collect metrics you can use to debug and observe Consul Dataplane behavior and performance.
----
-
-# Consul Dataplane Telemetry
-
-Consul Dataplane collects metrics about its own status and performance.
-The following external metrics stores are supported:
-
-- [DogstatsD](https://docs.datadoghq.com/developers/dogstatsd/)
-- [Prometheus](https://prometheus.io/docs/prometheus/latest/)
-- [StatsD](https://github.com/statsd/statsd)
-
-Consul Dataplane uses the same external metrics store that is configured for Envoy. To enable
-telemetry for Consul Dataplane, enable telemetry for Envoy by specifying an external metrics store
-in the proxy-defaults configuration entry or directly in the proxy.config field of the proxy service
-definition. Refer to the [Envoy bootstrap
-configuration](/consul/docs/connect/proxies/envoy#bootstrap-configuration) for details.
-
-## Prometheus Metrics Merging
-
-When Prometheus metrics are used, Consul Dataplane configures Envoy to serve merged metrics through
-a single endpoint. Metrics from the following sources are collected and merged:
-
-- Consul Dataplane
-- The Envoy process managed by Consul Dataplane
-- (optionally) Your service instance running alongside Consul Dataplane
-
-## Metrics Reference
-
-Consul Dataplane supports the following metrics:
-
-| Metric Name | Description | Unit | Type |
-| :------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------- | :------ |
-| `consul_dataplane.connect_duration` | Measures the time `consul-dataplane` spends connecting to a Consul server, including the time to discover Consul server addresses and to complete other setup prior to Envoy opening the xDS stream. | ms | timer |
-| `consul_dataplane.connected` | Indicates whether `consul-dataplane` is currently connected to a Consul server. | 1 or 0 | gauge |
-| `consul_dataplane.connection_errors` | Measures the number of errors encountered on gRPC streams. This is labeled with the gRPC error status code. | number of errors | gauge |
-| `consul_dataplane.discover_servers_duration` | Measures the time `consul-dataplane` spends discovering Consul server IP addresses. | ms | timer |
-| `consul_dataplane.envoy_connected` | Indicates whether Envoy is currently connected to `consul-dataplane` and able to receive xDS updates. | 1 or 0 | gauge |
-| `consul_dataplane.login_duration` | Measures the time `consul-dataplane` spends logging in to an ACL auth method. | ms | timer |
diff --git a/website/content/docs/connect/dev.mdx b/website/content/docs/connect/dev.mdx
deleted file mode 100644
index e31a2f423a61..000000000000
--- a/website/content/docs/connect/dev.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Debugging
-description: >-
- Use the `consul connect proxy` command to connect to services or masquerade as other services for development and debugging purposes. Example code demonstrates connecting to services that are part of the service mesh as listeners only.
----
-
-# Service Mesh Debugging
-
-It is often necessary to connect to a service for development or debugging.
-If a service only exposes a service mesh listener, then we need a way to establish
-a mutual TLS connection to the service. The
-[`consul connect proxy` command](/consul/commands/connect/proxy) can be used
-for this task on any machine with access to a Consul agent (local or remote).
-
-Restricting access to services only via service mesh ensures that the only way to
-connect to a service is through valid authorization of the
-[intentions](/consul/docs/connect/intentions). This can extend to developers
-and operators, too.
-
-## Connecting to Mesh-only Services
-
-As an example, let's assume that we have a PostgreSQL database running that
-we want to connect to via `psql`, but the only non-loopback listener is
-via Connect. Let's also assume that we have an ACL token to identify as
-`operator-mitchellh`. We can start a local proxy:
-
-```shell-session
-$ consul connect proxy \
- -service operator-mitchellh \
- -upstream postgresql:8181
-```
-
-This works because the source `-service` does not need to be registered
-in the local Consul catalog. However, to retrieve a valid identifying
-certificate, the ACL token must have `service:write` permissions. This
-can be used as a sort of "debug service" to represent people, too. In
-the example above, the proxy is identifying as `operator-mitchellh`.
-
-With the proxy running, we can now use `psql` like normal:
-
-```shell-session
-$ psql --host=127.0.0.1 --port=8181 --username=mitchellh mydb
->
-```
-
-This `psql` session is now happening through our local proxy via an
-authorized mutual TLS connection to the PostgreSQL service in our Consul
-catalog.
-
-### Masquerading as a Service
-
-You can also easily masquerade as any source service by setting the
-`-service` value to any service. Note that the proper ACL permissions are
-required to perform this task.
-
-For example, if you have an ACL token that allows `service:write` for
-`web` and you want to connect to the `postgresql` service as "web", you
-can start a proxy like so:
-
-```shell-session
-$ consul connect proxy \
- -service web \
- -upstream postgresql:8181
-```
diff --git a/website/content/docs/connect/distributed-tracing.mdx b/website/content/docs/connect/distributed-tracing.mdx
deleted file mode 100644
index ffe5ef033bb6..000000000000
--- a/website/content/docs/connect/distributed-tracing.mdx
+++ /dev/null
@@ -1,265 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Distributed Tracing
-description: >-
- Distributed tracing tracks the path of a request as it traverses the service mesh. Consul supports distributed tracing for applications that have it implemented. Learn how to integrate tracing libraries in your application and configure Consul to participate in that tracing.
----
-
-# Distributed Tracing
-
-Distributed tracing is a way to track and correlate requests across microservices. Distributed tracing must first
-be implemented in each application, it cannot be added by Consul. Once implemented in your applications, adding
-distributed tracing to Consul will add the sidecar proxies as spans in the request path.
-
-## Application Changes
-
-Consul alone cannot implement distributed tracing for your applications. Each application must propagate the required
-headers. Typically this is done using a tracing library such as:
-
-- https://github.com/opentracing/opentracing-go
-- https://github.com/DataDog/dd-trace-go
-- https://github.com/openzipkin/zipkin-go
-
-## Configuration
-
-Once your applications have been instrumented with a tracing library, you are ready to configure Consul to add sidecar
-proxy spans to the trace. Your eventual config will look something like:
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-Config {
- protocol = "http"
- envoy_tracing_json = <
-
--> **NOTE:** This example uses a [proxy defaults](/consul/docs/connect/config-entries/proxy-defaults) configuration entry, which applies to all proxies,
-but you can also apply the configuration in the
-[`proxy` block of your service configuration](/consul/docs/connect/proxies/proxy-config-reference#proxy-parameters). The proxy service registration is not supported on Kubernetes.
-
-Within the config there are two keys you need to customize:
-
-1. [`envoy_tracing_json`](/consul/docs/connect/proxies/envoy#envoy_tracing_json): Sets the tracing configuration for your specific tracing type.
- See the [Envoy tracers documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/trace/trace) for your
- specific collector's configuration. This configuration will reference the cluster name defined in `envoy_extra_static_clusters_json`.
-1. [`envoy_extra_static_clusters_json`](/consul/docs/connect/proxies/envoy#envoy_extra_static_clusters_json): Defines the address
- of your tracing collector where Envoy will send its spans. In this example the URL was `collector-url:9411`.
-
-## Applying the configuration
-
-This configuration only applies when proxies are _restarted_ since it changes the _bootstrap_ config for Envoy
-which can only be applied on startup. This means you must restart all your proxies for changes to this
-config to take effect.
-
--> **Note:** On Kubernetes this is a matter of restarting your deployments, e.g. `kubectl rollout restart deploy/deploy-name`.
-
-## Considerations
-
-1. Distributed tracing is only supported for HTTP and gRPC services. You must specify the protocol either globally
- via a proxy defaults config entry:
-
-
-
- ```hcl
- Kind = "proxy-defaults"
- Name = "global"
- Config {
- protocol = "http"
- }
- ```
-
- ```yaml
- apiVersion: consul.hashicorp.com/v1alpha1
- kind: ProxyDefaults
- metadata:
- name: global
- spec:
- config:
- protocol: http
- ```
-
- ```json
- {
- "Kind": "proxy-defaults",
- "Name": "global",
- "Config": {
- "protocol": "http"
- }
- }
- ```
-
-
-
- Or via a service defaults config entry for each service:
-
-
-
- ```hcl
- Kind = "service-defaults"
- Name = "service-name"
- Protocol = "http"
- ```
-
- ```yaml
- apiVersion: consul.hashicorp.com/v1alpha1
- kind: ServiceDefaults
- metadata:
- name: service-name
- spec:
- protocol: http
- ```
-
- ```json
- {
- "Kind": "service-defaults",
- "Name": "service-name",
- "Protocol": "http"
- }
- ```
-
-
-
-1. Requests through [Ingress Gateways](/consul/docs/connect/gateways/ingress-gateway) will not be traced unless the header
- `x-client-trace-id: 1` is set (see [hashicorp/consul#6645](https://github.com/hashicorp/consul/issues/6645)).
-
-1. Consul's proxies do not currently support [OpenTelemetry](https://opentelemetry.io/) spans, as Envoy has not
- [fully implemented](https://github.com/envoyproxy/envoy/issues/9958) it. Instead, you can add
- OpenTelemetry libraries to your application to emit spans for other
- [tracing protocols](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/observability/tracing)
- supported by Envoy, such as Zipkin or Jaeger.
-
-1. Tracing is only supported with Envoy proxies, not the built-in proxy.
-
-1. When configuring the Zipkin tracer in `envoy_tracing_json`, set [`trace_id_128bit`](https://www.envoyproxy.io/docs/envoy/v1.21.0/api-v3/config/trace/v3/zipkin.proto#envoy-v3-api-field-config-trace-v3-zipkinconfig-trace-id-128bit) to `true` if your application is configured to generate 128-bit trace IDs. For example:
-
-
-
- ```json
- {
- "http": {
- "name": "envoy.tracers.zipkin",
- "typedConfig": {
- "@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
- "collector_cluster": "zipkin",
- "collector_endpoint_version": "HTTP_JSON",
- "collector_endpoint": "/api/v2/spans",
- "shared_span_context": false,
- "trace_id_128bit": true
- }
- }
- }
- ```
-
-
diff --git a/website/content/docs/connect/ecs.mdx b/website/content/docs/connect/ecs.mdx
new file mode 100644
index 000000000000..ac58fb713b0a
--- /dev/null
+++ b/website/content/docs/connect/ecs.mdx
@@ -0,0 +1,79 @@
+---
+layout: docs
+page_title: Connect ECS services with Consul
+description: >-
+ Consul documentation provides reference material for all features and options available in Consul.
+---
+
+# Connect ECS services with Consul
+
+This topic describes how to configure routes between tasks after registering the tasks to Consul service mesh.
+
+## Overview
+
+To enable tasks to call through the service mesh, complete the following steps:
+
+1. Configure the sidecar proxy to listen on a different port for each upstream service your application needs to call.
+1. Modify your application to make requests to the sidecar proxy on the specified port.
+
+## Requirements
+
+Consul service mesh must be deployed to ECS before you can bind a network address. For more information, refer to the following topics:
+
+- [Deploy Consul to ECS using the Terraform module](/consul/docs/deploy/server/ecs)
+- [Deploy Consul to ECS manually](/consul/docs/deploy/server/ecs/manual)
+
+## Configure the sidecar proxy
+
+Add the `upstreams` block to your application configuration and specify the following fields:
+
+- `destinationName`: Specifies the name of the upstream service as it is registered in the Consul service catalog.
+- `localBindPort`: Specifies the port that the proxy forwards requests to. You must specify an unused port but it does not need to match the upstream service port.
+
+In the following example, the route from an application named `web` to an application named `backend` goes through port `8080`:
+
+```hcl
+module "web" {
+ family = "web"
+ upstreams = [
+ {
+ destinationName = "backend"
+ localBindPort = 8080
+ }
+ ]
+}
+```
+
+You must include all upstream services in the `upstream` configuration.
+
+## Configure your application
+
+Use an appropriate environment variable in your container definition to configure your application to call the upstream service at the loopback address.
+
+In the following example, the `web` application calls the `backend` service by sending requests to the
+`BACKEND_URL` environment variable:
+
+```hcl
+module "web" {
+ family = "web"
+ upstreams = [
+ {
+ destinationName = "backend"
+ localBindPort = 8080
+ }
+ ]
+ container_definitions = [
+ {
+ name = "web"
+ environment = [
+ {
+ name = "BACKEND_URL"
+ value = "http://localhost:8080"
+ }
+ ]
+ ...
+ }
+ ]
+ ...
+}
+```
\ No newline at end of file
diff --git a/website/content/docs/connect/enable.mdx b/website/content/docs/connect/enable.mdx
new file mode 100644
index 000000000000..41fc41be5f17
--- /dev/null
+++ b/website/content/docs/connect/enable.mdx
@@ -0,0 +1,86 @@
+---
+layout: docs
+page_title: Enable service mesh
+description: >-
+ Learn how to enable and configure Consul's service mesh capabilities in agent configurations.
+---
+
+# Enable service mesh
+
+This page describes the process to enable Consul's service mesh features.
+
+For more information about configurable options in the service mesh and the process to full bootstrap Consul's service mesh, refer to [Connect services](/consul/docs/connect).
+
+## Enable mesh in server agent configuration
+
+Consul's service mesh features are not enabled by default when running Consul on virtual machines. To enable the service mesh, you must change the configuration of your Consul servers. You do not need to change client agent configurations in order to use the service mesh.
+
+To enable Consul's service mesh, set `connect.enabled` to `true` in a new or existing [agent configuration file](/consul/docs/reference/agent).
+
+Service mesh is enabled by default on Kubernetes deployments.
+
+
+
+
+
+```hcl
+connect {
+ enabled = true
+}
+```
+
+
+
+
+
+```json
+{
+ "connect": {
+ "enabled": true
+ }
+}
+```
+
+
+
+
+
+```yaml
+server:
+ connect:
+ enabled: true
+```
+
+
+
+
+
+## Apply configuration to Consul
+
+After you update your cluster's configuration, the Consul agent must restart before the service mesh is enabled.
+
+On VM deployments, restart each server in the cluster one at a time in order to maintain the cluster's availability.
+
+On Kubernetes deployments, you can run the following command to apply the configuration to your deployment:
+
+```shell-session
+$ kubectl apply -f values.yaml
+```
+
+If you use the Consul on Kubernetes CLI, you can run the following command instead:
+
+```shell-session
+$ consul-k8s upgrade -config-file values.yaml
+```
+
+For information about the `consul-k8s` CLI and how to install it, refer to [Install Consul on Kubernetes from Consul K8s CLI](/consul/docs/reference/cli/consul-k8s)
+
+## Next steps
+
+After you enable Consul's service mesh, enable the built-in certificate authority to ensure secure service-to-service communication and configure defaults settings for the Envoy proxies in the service mesh. You can also enable Consul's Access Control List (ACL) system to provide additional security.
+
+Refer to the following topics for more information:
+
+- [Bootstrap Consul's built-in CA](/consul/docs/secure-mesh/certificate/bootstrap)
+- [Configure proxy defaults](/consul/docs/connect/proxy)
+- [Enable Consul's ACL system](/consul/docs/secure/acl)
\ No newline at end of file
diff --git a/website/content/docs/connect/gateways/api-gateway/configuration/gateway.mdx b/website/content/docs/connect/gateways/api-gateway/configuration/gateway.mdx
deleted file mode 100644
index 2ea1d1e64eeb..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/configuration/gateway.mdx
+++ /dev/null
@@ -1,230 +0,0 @@
----
-layout: docs
-page_title: Gateway Resource Configuration
-description: >-
- Learn how to configure the `Gateway` resource to define how the Consul API Gateway handles incoming service mesh traffic with this configuration model and reference specifications.
----
-
-# Gateway Resource Configuration
-
-This topic provides full details about the `Gateway` resource.
-
-## Introduction
-
-A `Gateway` is an instance of network infrastructure that determines how service traffic should be handled. A `Gateway` contains one or more [`listeners`](#listeners) that bind to a set of IP addresses. An `HTTPRoute` or `TCPRoute` can then attach to a gateway listener to direct traffic from the gateway to a service.
-
-Gateway instances derive their configurations from the [`GatewayClass`](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass) resource, which acts as a template for individual `Gateway` deployments. Refer to [GatewayClass](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass) for additional information.
-
-Specify the following parameters to declare a `Gateway`:
-
-| Parameter | Description | Required |
-| :----------- |:---------------------------------------------------------------------------------------------------------------------------------------------------------- |:-------- |
-| `kind` | Specifies the type of configuration object. The value should always be `Gateway`. | Required |
-| `description` | Human-readable string that describes the purpose of the `Gateway`. | Optional |
-| `version ` | Specifies the Kubernetes API version. The value should always be `gateway.networking.k8s.io/v1alpha2` | Required |
-| `scope` | Specifies the effective scope of the Gateway. The value should always be `namespaced`. | Required |
-| `fields` | Specifies the configurations for the Gateway. The fields are listed in the [configuration model](#configuration-model). Details for each field are described in the [specification](#specification). | Required |
-
-
-## Configuration model
-
-The following outline shows how to format the configurations in the `Gateway` object. Click on a property name to view details about the configuration.
-
-* [`gatewayClassName`](#gatewayclassname): string | required
-* [`listeners`](#listeners): array of objects | required
- * [`allowedRoutes`](#listeners-allowedroutes): object | required
- * [`namespaces`](#listeners-allowedroutes-namespaces): object | required
- * [`from`](#listeners-namespaces-from): string | required
- * [`selector`](#listeners-allowedroutes-namespaces-selector): object | required if `from` is configured to `selector`
- * [`matchExpressions`](#listeners-allowedroutes-namespaces-selector-matchexpressions): array of objects | required if `matchLabels` is not configured
- * [`key`](#listeners-allowedroutes-namespaces-selector-matchexpressions): string | required if `matchExpressions` is declared
- * [`operator`](#listeners-allowedroutes-namespaces-selector-matchexpressions): string | required if `matchExpressions` is declared
- * [`values`](#listeners-allowedroutes-namespaces-selector-matchexpressions): array of strings | required if `matchExpressions` is declared
- * [`matchLabels`](#listeners-allowedroutes-namespaces-selector-matchlabels): map of strings | required if `matchExpressions` is not configured
- * [`hostname`](#listeners-hostname): string | required
- * [`name`](#listeners-name): string | required
- * [`port`](#listeners-port): integer | required
- * [`protocol`](#listeners-protocol): string | required
- * [`tls`](#listeners-tls): object | required if `protocol` is set to `HTTPS`
- * [`certificateRefs`](#listeners-tls): array or objects | required if `tls` is declared
- * [`name`](#listeners-tls): string | required if `certificateRefs` is declared
- * [`namespace`](#listeners-tls): string | required if `certificateRefs` is declared
- * [`mode`](#listeners-tls): string | required if `certificateRefs` is declared
- * [`options`](#listeners-tls): map of strings | optional
-
-## Specification
-
-This topic provides details about the configuration parameters.
-
-### gatewayClassName
-Specifies the name of the [`GatewayClass`](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass) resource used for the `Gateway` instance. Unless you are using a custom [GatewayClass](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass), this value should be set to `consul`.
-* Type: string
-* Required: required
-
-### listeners
-Specifies the `listeners` associated with the `Gateway`. At least one `listener` must be specified. Each `listener` within a `Gateway` must have a unique combination of `hostname`, `port`, and `protocol`.
-* Type: array of objects
-* Required: required
-
-### listeners.allowedRoutes
-Specifies a `namespace` object that defines the types of routes that may be attached to a listener.
-* Type: object
-* Required: required
-
-### listeners.allowedRoutes.namespaces
-Determines which routes are allowed to attach to the `listener`. Only routes in the same namespace as the `Gateway` may be attached by default.
-* Type: string
-* Required: optional
-* Default: Same namespace as the parent Gateway
-
-### listeners.allowedRoutes.namespaces.from
-Determines which namespaces are allowed to attach a route to the `Gateway`. You can specify one of the following strings:
-
-* `All`: Routes in all namespaces may be attached to the `Gateway`.
-* `Same` (default): Only routes in the same namespace as the `Gateway` may be attached.
-* `Selector`: Only routes in namespaces that match the [`selector`](#listeners-allowedroutes-namespaces-selector) may be attached.
-
-This parameter is required.
-
-### listeners.allowedRoutes.namespaces.selector
-Specifies a method for selecting routes that are allowed to attach to the listener. The `Gateway` checks for namespaces in the network that match either a regular expression or a label. Routes from the matching namespace are allowed to attach to the listener.
-
-You can configure one of the following objects:
-
-* [`matchExpressions`](#listeners-allowedroutes-namespaces-selector-matchexpressions)
-* [`matchLabels`](#listeners-allowedroutes-namespaces-selector-matchlabels)
-
-This field is required when [`from`](#listeners-allowedroutes-namespaces-from) is configured to `Selector`.
-
-### listeners.allowedRoutes.namespaces.selector.matchExpressions
-Specifies an array of requirements for matching namespaces. If a match is found, then routes from the matching namespace(s) are allowed to attach to the `Gateway`. The following table describes members of the `matchExpressions` array:
-
-| Requirement | Description | Type | Required |
-|--- |--- |--- |--- |
-|`key` | Specifies the label that the `key` applies to. | string | required when `matchExpressions` is declared |
-|`operator` | Specifies the key's relation to a set of values. You can use the following keywords:
`In`: Only routes in namespaces that contain the strings in the `values` field can attach to the `Gateway`.
`NotIn`: Routes in namespaces that do not contain the strings in the `values` field can attach to the `Gateway`.
`Exists`: Routes in namespaces that contain the `key` value are allowed to attach to the `Gateway`.
`DoesNotExist`: Routes in namespaces that do not contain the `key` value are allowed to attach to the `Gateway`.
| string | required when `matchExpressions` is declared |
-|`values` | Specifies an array of string values. If `operator` is configured to `In` or `NotIn`, then the `values` array must contain values. If `operator` is configured to `Exists` or `DoesNotExist`, then the `values` array must be empty. | array of strings | required when `matchExpressions` is declared |
-
-In the following example, routes in namespaces that contain `foo` and `bar` are allowed to attach routes to the `Gateway`.
-```yaml
-namespaceSelector:
- matchExpressions:
- - key: kubernetes.io/metadata.name
- operator: In
- values:
- - foo
- - bar
-```
-
-Refer to [Labels and Selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements) in the Kubernetes documentation for additional information about `matchExpressions`.
-
-### listeners.allowedRoutes.namespaces.selector.matchLabels
-Specifies an array of labels and label values. If a match is found, then routes with the matching label(s) are allowed to attach to the `Gateway`. This selector can contain any arbitrary key/value pair.
-
-In the following example, routes in namespaces that have a `bar` label are allowed to attach to the `Gateway`.
-
-```yaml
-namespaceSelector:
- matchLabels:
- foo: bar
-```
-
-Refer to [Labels and Selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the Kubernetes documentation for additional information about labels.
-
-### listeners.hostname
-Specifies the `listener`'s hostname.
-* Type: string
-* Required: required
-
-### listeners.name
-Specifies the `listener`'s name.
-* Type: string
-* Required: required
-
-### listeners.port
-Specifies the port number that the `listener` attaches to.
-* Type: integer
-* Required: required
-
-### listeners.protocol
-Specifies the protocol the `listener` communicates on.
-* Type: string
-* Required: required
-
-Allowed values are `TCP`, `HTTP`, or `HTTPS`
-
-### listeners.tls
-Specifies the `tls` configurations for the `Gateway`. The `tls` object is required if `protocol` is set to `HTTPS`. The object contains the following fields:
-
-| Parameter | Description | Type | Required |
-| --- | --- | --- | --- |
-| `certificateRefs` |
Specifies Kubernetes `name` and `namespace` objects that contains TLS certificates and private keys. The certificates establish a TLS handshake for requests that match the `hostname` of the associated `listener`. Each reference must be a Kubernetes Secret. If you are using a Secret in a namespace other than the `Gateway`'s, each reference must also have a corresponding [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant).
| Object or array | Required if `tls` is set |
-| `mode` | Specifies the TLS Mode. Should always be set to `Terminate` for `HTTPRoutes` | string | Required if `certificateRefs` is set |
-| `options` | Specifies additional Consul API Gateway options. | Map of strings | optional |
-
-The following keys for `options` are available
-* `api-gateway.consul.hashicorp.com/tls_min_version`
-* `api-gateway.consul.hashicorp.com/tls_max_version`
-* `api-gateway.consul.hashicorp.com/tls_cipher_suites`
-
-In the following example, `tls` settings are configured to use a secret named `consul-server-cert` in the same namespace as the `Gateway` and the minimum tls version is set to `TLSv1_2`.
-
-```yaml
-
-tls:
- certificateRefs:
- - name: consul-server-cert
- group: ""
- kind: Secret
- mode: Terminate
- options:
- api-gateway.consul.hashicorp.com/tls_min_version: "TLSv1_2"
-
-```
-
-#### Example cross-namespace certificateRef
-
-The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 24-27), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 31-35).
-
-
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1beta1
- kind: Gateway
- metadata:
- name: example-gateway
- namespace: gateway-namespace
- spec:
- gatewayClassName: consul
- listeners:
- - protocol: HTTPS
- port: 443
- name: https
- allowedRoutes:
- namespaces:
- from: Same
- tls:
- certificateRefs:
- - name: cert
- namespace: secret-namespace
- group: ""
- kind: Secret
- ---
-
- apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: ReferenceGrant
- metadata:
- name: reference-grant
- namespace: secret-namespace
- spec:
- from:
- - group: gateway.networking.k8s.io
- kind: Gateway
- namespace: gateway-namespace
- to:
- - group: ""
- kind: Secret
- name: cert
- ```
-
-
diff --git a/website/content/docs/connect/gateways/api-gateway/configuration/index.mdx b/website/content/docs/connect/gateways/api-gateway/configuration/index.mdx
deleted file mode 100644
index f6fc99d05de8..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/configuration/index.mdx
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: docs
-page_title: Consul API gateway configuration overview
-description: >-
- Configure your Consul API Gateway to manage traffic into your service mesh. Learn about the Kubernetes Gateway Specification items you can configure and how to configure custom API Gateways.
----
-
-# Consul API gateway configuration overview
-
-This topic provides an overview of the configuration items you can use to create API gateways, configure listeners, define routes, and apply additional resources that may be necessary to operate Consul API gateways in your environment.
-
-## Configurations for virtual machines
-
-Apply the following configuration items if your network runs on virtual machines nodes:
-
-| Configuration | Description | Usage |
-| --- | --- | --- |
-| [`api-gateway`](/consul/docs/connect/config-entries/api-gateway) | Defines the main infrastructure resource for declaring an API gateway and listeners on the gateway. | [Deploy API gateway listeners on virtual machines](/consul/docs/connect/gateways/api-gateway/deploy/listeners-vms) |
-| [`http-route`](/consul/docs/connect/config-entries/http-route) | Enables HTTP traffic to reach services in the mesh from a listener on the gateway.| [Define routes on virtual machines](/consul/docs/connect/gateways/api-gateway/define-routes/routes-vms) |
-| [`tcp-route`](/consul/docs/connect/config-entries/tcp-route) | Enables TCP traffic to reach services in the mesh from a listener on the gateway.| [Define routes on virtual machines](/consul/docs/connect/gateways/api-gateway/define-routes/routes-vms) |
-| [`file-system-certificate`](/consul/docs/connect/config-entries/file-system-certificate) | Provides gateway with a CA certificate so that requests between the user and the gateway endpoint are encrypted. | [Encrypt API gateway traffic on virtual machines](/consul/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms) |
-| [`inline-certificate`](/consul/docs/connect/config-entries/inline-certificate) | Provides gateway with a CA certificate so that requests between the user and the gateway endpoint are encrypted. | [Encrypt API gateway traffic on virtual machines](/consul/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms) |
-| [`service-intentions`](/consul/docs/connect/config-entries/service-intentions) | Specifies traffic communication rules between services in the mesh. Intentions also enforce rules for service-to-service traffic routed through a Consul API gateway. | General configuration for securing a service mesh |
-
-## Configurations for Kubernetes
-
-Apply the following configuration items if your network runs on Kubernetes:
-
-| Configuration | Description | Usage |
-| --- | --- | --- |
-| [`Gateway`](/consul/docs/connect/gateways/api-gateway/configuration/gateway) | Defines the main infrastructure resource for declaring an API gateway and listeners on the gateway. It also specifies the name of the `GatewayClass`. | [Deploy listeners on Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s) |
-| [`GatewayClass`](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass) | Defines a class of gateway resources used as a template for creating gateways. The default gateway class is `consul` and is suitable for most API gateway implementations. | [Deploy listeners on Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s) |
-| [`GatewayClassConfig`](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclassconfig) | Describes additional gateway-related configuration parameters for the `GatewayClass` resource. | [Deploy listeners on Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s) |
-| [`Routes`](/consul/docs/connect/gateways/api-gateway/configuration/routes) | Specifies paths from the gateway listener to backend services. | [Define routes on Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/routes-k8s)
[Reroute traffic in Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/reroute-http-requests)
[Route traffic to peered services in Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services)
|
-| [`MeshServices`](/consul/docs/connect/gateways/api-gateway/configuration/meshservices) | Enables routes to reference services in Consul. | [Route traffic to peered services in Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services) |
-| [`ServiceIntentions`](/consul/docs/connect/config-entries/service-intentions) | Specifies traffic communication rules between services in the mesh. Intentions also enforce rules for service-to-service traffic routed through a Consul API gateway. | General configuration for securing a service mesh |
-
-
diff --git a/website/content/docs/connect/gateways/api-gateway/define-routes/reroute-http-requests.mdx b/website/content/docs/connect/gateways/api-gateway/define-routes/reroute-http-requests.mdx
deleted file mode 100644
index 5a56aeee792a..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/define-routes/reroute-http-requests.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: docs
-page_title: Reroute HTTP Requests
-description: >-
- Learn how to configure Consul API Gateway to reroute HTTP requests to a specific path.
----
-
-# Reroute HTTP Requests
-
-This topic describes how to configure Consul API Gateway to reroute HTTP requests.
-
-## Requirements
-
-1. Verify that the [requirements](/consul/docs/api-gateway/tech-specs) have been met.
-1. Verify that the Consul API Gateway CRDs and controller have been installed and applied. Refer to [Installation](/consul/docs/connect/gateways/api-gateway/deploy/install-k8s) for details.
-
-## Configuration
-
-Specify the following fields in your `Route` configuration. Refer to the [Route configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/routes) for details about the parameters.
-
-- [`rules.filters.type`](/consul/docs/connect/gateways/api-gateway/configuration/routes#rules-filters-type): Set this parameter to `URLRewrite` to instruct Consul API Gateway to rewrite the URL when specific conditions are met.
-- [`rules.filters.urlRewrite`](/consul/docs/connect/gateways/api-gateway/configuration/routes#rules-filters-urlrewrite): Specify the `path` configuration.
-- [`rules.filters.urlRewrite.path`](/consul/docs/connect/gateways/api-gateway/configuration/routes#rules-filters-urlrewrite-path): Contains the paths that incoming requests should be rewritten to based on the match conditions.
-
-To configure the route to accept paths with or without a trailing slash, you must make two separate routes to handle each case.
-
-### Example
-
-In the following example, requests to` /incoming-request-prefix/` are forwarded to the `backendRef` as `/prefix-backend-receives/`. As a result, requests to `/incoming-request-prefix/request-path` are received by `backendRef` as `/prefix-backend-receives/request-path`.
-
-
-
-```yaml hideClipboard
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: HTTPRoute
-metadata:
- name: example-route
- ##...
-spec:
- parentRefs:
- - group: gateway.networking.k8s.io
- kind: Gateway
- name: api-gateway
- rules:
- - backendRefs:
- . . .
- filters:
- - type: URLRewrite
- urlRewrite:
- path:
- replacePrefixMatch: /prefix-backend-receives/
- type: ReplacePrefixMatch
- matches:
- - path:
- type: PathPrefix
- value: /incoming–request-prefix/
-```
-
\ No newline at end of file
diff --git a/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx b/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx
deleted file mode 100644
index e323f8ea9e37..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx
+++ /dev/null
@@ -1,76 +0,0 @@
----
-page_title: Route Traffic to Peered Services
-description: Learn how to configure Consul API Gateway to route traffic to services connected to the mesh through a peering connection.
----
-
-# Route Traffic to Peered Services
-
-This topic describes how to configure Consul API Gateway to route traffic to services connected to the mesh through a cluster peering connection.
-
-## Requirements
-
-- Consul v1.14 or later
-- Verify that the [requirements](/consul/docs/api-gateway/tech-specs) have been met.
-- Verify that the Consul API Gateway CRDs and controller have been installed and applied. Refer to [Installation](/consul/docs/connect/gateways/api-gateway/deploy/install-k8s) for details.
-- A peering connection must already be established between Consul clusters. Refer to [Cluster Peering on Kubernetes](/consul/docs/k8s/connect/cluster-peering/tech-specs) for instructions.
-- The Consul service that you want to route traffic to must be exported to the cluster containing your `Gateway`. Refer to [Cluster Peering on Kubernetes](/consul/docs/k8s/connect/cluster-peering/tech-specs) for instructions.
-- A `ServiceResolver` for the Consul service you want to route traffic to must be created in the cluster that contains your `Gateway`. Refer to [Service Resolver Configuration Entry](/consul/docs/connect/config-entries/service-resolver) for instructions.
-
-## Configuration
-
-Specify the following fields in your `MeshService` configuration to use this feature. Refer to the [MeshService configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/meshservice) for details about the parameters.
-
-- [`name`](/consul/docs/connect/gateways/api-gateway/configuration/meshservice#name)
-- [`peer`](/consul/docs/connect/gateways/api-gateway/configuration/meshservice#peer)
-
-## Example
-
-In the following example, routes that use `example-mesh-service` as a backend are configured to send requests to the `echo` service exported by the peered Consul cluster `cluster-02`.
-
-
-
-```yaml hideClipboard
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ServiceResolver
-metadata:
- name: echo
-spec:
- redirect:
- peer: cluster-02
- service: echo
-```
-
-
-
-
-```yaml hideClipboard
-apiVersion: api-gateway.consul.hashicorp.com/v1alpha1
-kind: MeshService
-metadata:
- name: example-mesh-service
-spec:
- name: echo
- peer: cluster-02
-```
-
-
-After applying the `meshservice.yaml` configuration, an `HTTPRoute` may then reference `example-mesh-service` as its `backendRef`.
-
-
-
-```yaml hideClipboard
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: HTTPRoute
-metadata:
- name: example-route
-spec:
- ...
- rules:
- - backendRefs:
- - group: consul.hashicorp.com
- kind: MeshService
- name: example-mesh-service
- port: 3000
- ...
-```
-
diff --git a/website/content/docs/connect/gateways/api-gateway/define-routes/routes-k8s.mdx b/website/content/docs/connect/gateways/api-gateway/define-routes/routes-k8s.mdx
deleted file mode 100644
index 13413e82bd36..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/define-routes/routes-k8s.mdx
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: docs
-page_title: Define API gateway routes on Kubernetes
-description: Learn how to define and attach HTTP and TCP routes to Consul API gateway listeners in Kubernetes-orchestrated networks.
----
-
-# Define API gateway routes on Kubernetes
-
-This topic describes how to configure HTTP and TCP routes and attach them to Consul API gateway listeners in Kubernetes-orchestrated networks. Routes are rule-based configurations that allow external clients to send requests to services in the mesh. For information
-
-## Overview
-
-The following steps describe the general workflow for defining and deploying routes:
-
-1. Define a route configuration that specifies the protocol type, name of the gateway to attach to, and rules for routing requests.
-1. Deploy the configuration to create the routes and attach them to the gateway.
-
-Routes and the gateways they are attached to are eventually-consistent objects. They provide feedback about their current state through a series of status conditions. As a result, you must manually check the route status to determine if the route successfully bound to the gateway.
-
-## Requirements
-
-Verify that your environment meets the requirements specified in [Technical specifications for Kubernetes](/consul/docs/connect/gateways/api-gateway/tech-specs).
-
-### OpenShift
-
-If your Kubernetes-orchestrated network runs on OpenShift, verify that OpenShift is enabled for your Consul installation. Refer to [OpenShift requirements](/consul/docs/connect/gateways/api-gateway/tech-specs#openshift-requirements) for additional information.
-
-## Define routes
-
-Define route configurations and bind them to listeners configured on the gateway so that Consul can route incoming requests to services in the mesh.
-
-1. Create a configuration file and specify the following fields:
-
- - `apiVersion`: Specifies the Kubernetes API gateway version. This must be set to `gateway.networking.k8s.io/v1beta1`
- - `kind`: Set to `HTTPRoute` or `TCPRoute`.
- - `metadata.name`: Specify a name for the route. The name is metadata that you can use to reference the configuration when performing Consul operations.
- - `spec.parentRefs.name`: Specifies a list of API gateways that the route binds to.
- - `spec. rules`: Specifies a list of routing rules for constructing a routing table that maps listeners to services.
-
- Refer to the [`Routes` configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/routes) for details about configuring route rules.
-
-1. Configure any additional fields necessary for your use case, such as the namespace or admin partition.
-1. Save the configuration.
-
-The following example creates a route named `example-route` associated with a listener defined in `example-gateway`.
-
-```yaml
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: HTTPRoute
-metadata:
- name: example-route
-spec:
- parentRefs:
- - name: example-gateway
- rules:
- - backendRefs:
- - kind: Service
- name: echo
- port: 8080
-```
-
-## Deploy the route configuration
-
-Apply the configuration to your cluster using the `kubectl` command. The following command applies the configuration to the `consul` namespace:
-
-```shell-session
-$ kubectl apply -f my-route.yaml -n consul
-```
diff --git a/website/content/docs/connect/gateways/api-gateway/define-routes/routes-vms.mdx b/website/content/docs/connect/gateways/api-gateway/define-routes/routes-vms.mdx
deleted file mode 100644
index 5fe459d2062b..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/define-routes/routes-vms.mdx
+++ /dev/null
@@ -1,121 +0,0 @@
----
-layout: docs
-page_title: Define API gateway routes on virtual machines
-description: Learn how to define and attach HTTP and TCP routes to Consul API gateway listeners so that requests from external clients can reach services in the mesh.
----
-
-# Define API gateway routes on virtual machines
-
-This topic describes how to configure HTTP and TCP routes and attach them to Consul API gateway listeners. Routes are rule-based configurations that allow external clients to send requests to services in the mesh.
-
-## Overview
-
-The following steps describe the general workflow for defining and deploying routes:
-
-1. Define routes in an HTTP or TCP configuration entry. The configuration entry includes rules for routing requests, target services in the mesh for the traffic, and the name of the gateway to attach to.
-1. Deploy the configuration entry to create the routes and attach them to the gateway.
-
-Routes and the gateways they are attached to are eventually-consistent objects. They provide feedback about their current state through a series of status conditions. As a result, you must manually check the route status to determine if the route is bound to the gateway successfully.
-
-## Requirements
-
-The following requirements must be satisfied to use API gateways on VMs:
-
-- Consul 1.15 or later
-- A Consul cluster with service mesh enabled. Refer to [`connect`](/consul/docs/agent/config/config-files#connect)
-- Network connectivity between the machine deploying the API Gateway and a Consul cluster agent or server
-
-### ACL requirements
-
-If ACLs are enabled, you must present a token with the following permissions to
-configure Consul and deploy API gateway routes:
-
-- `mesh: read`
-- `mesh: write`
-
-Refer [Mesh Rules](/consul/docs/security/acl/acl-rules#mesh-rules) for
-additional information about configuring policies that enable you to interact
-with Consul API gateway configurations.
-
-## Define the routes
-
-Define route configurations and bind them to listeners configured on the gateway so that Consul can route incoming requests to services in the mesh.
-
-1. Create a route configuration entry file and specify the following settings:
- - `Kind`: Set to `http` or `tcp`.
- - `Name`: Specify a name for the route. The name is metadata that you can use to reference the configuration when performing Consul operations.
- - `Parents`: Specifies a list of API gateways that the route binds to.
- - `Rules`: If you are configuring HTTP routes, define a list of routing rules for constructing a routing table that maps listeners to services. Each member of the list is a map that may containing the following fields:
- - `Filters`
- - `Matches`
- - `Services`
-
- Refer to the [HTTP route configuration entry](/consul/docs/connect/config-entries/http-route) and [TCP route configuration entry](/consul/docs/connect/config-entries/tcp-route) reference for details about configuring routes.
-
-1. Configure any additional fields necessary for your use case, such as the namespace or admin partition.
-1. Save the configuration.
-
-
-The following example routes requests from the listener on the API gateway at port `8443` to services in Consul based on the path of the request. When an incoming request starts at path `/`, Consul forwards 90 percent of the requests to the `ui` service and 10 percent to `experimental-ui`. Consul also forwards requests starting with `/api` to `api`.
-
-```hcl
-Kind = "http-route"
-Name = "my-http-route"
-
-// Rules define how requests will be routed
-Rules = [
- // Send all requests to UI services with 10% going to the "experimental" UI
- {
- Matches = [
- {
- Path = {
- Match = "prefix"
- Value = "/"
- }
- }
- ]
- Services = [
- {
- Name = "ui"
- Weight = 90
- },
- {
- Name = "experimental-ui"
- Weight = 10
- }
- ]
- },
- // Send all requests that start with the path `/api` to the API service
- {
- Matches = [
- {
- Path = {
- Match = "prefix"
- Value = "/api"
- }
- }
- ]
- Services = [
- {
- Name = "api"
- }
- ]
- }
-]
-
-Parents = [
- {
- Kind = "api-gateway"
- Name = "my-gateway"
- SectionName = "my-http-listener"
- }
-]
-```
-
-## Deploy the route configuration
-
-Run the `consul config write` command to attach the routes to the specified gateways. The following example writes a configuration called `my-http-route.hcl`:
-
-```shell-session
-$ consul config write my-http-route.hcl
-```
\ No newline at end of file
diff --git a/website/content/docs/connect/gateways/api-gateway/deploy/listeners-k8s.mdx b/website/content/docs/connect/gateways/api-gateway/deploy/listeners-k8s.mdx
deleted file mode 100644
index 7ce6ed9c5002..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/deploy/listeners-k8s.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: docs
-page_title: Deploy API gateway listeners in Kubernetes
-description: >-
- Learn how to create API gateway configurations in Kubernetes that enable you to instantiate gateway instances.
----
-
-# Deploy API gateway listeners in Kubernetes
-
-This topic describes how to deploy Consul API gateway listeners to Kubernetes-orchestrated environments. If you want to implement API gateway listeners on VMs, refer to [Deploy API gateway listeners to virtual machines](/consul/docs/connect/gateways/api-gateway/deploy/listeners-vms).
-
-## Overview
-
-API gateways have one or more listeners that serve as ingress points for requests to services in a Consul service mesh. Create an [API gateway configuration](/consul/docs/connect/gateways/api-gateway/configuration/gateway) and define listeners that expose ports on the endpoint for ingress. Apply the configuration to direct Kubernetes to start API gateway services.
-
-### Routes
-
-After deploying the gateway, attach HTTP or TCP [routes](/consul/docs/connect/gateways/api-gateway/configuration/routes) to listeners defined in the gateway to control how requests route to services in the network.
-
-### Intentions
-
-Configure Consul intentions to allow or prevent traffic between gateway listeners and services in the mesh. Refer to [Service intentions](/consul/docs/connect/intentions) for additional information.
-
-
-## Requirements
-
-1. Verify that your environment meets the requirements specified in [Technical specifications for Kubernetes](/consul/docs/connect/gateways/api-gateway/tech-specs).
-1. Verify that the Consul API Gateway CRDs were applied. Refer to [Installation](/consul/docs/connect/gateways/api-gateway/install-k8s) for details.
-1. If your Kubernetes-orchestrated network runs on OpenShift, verify that OpenShift is enabled for your Consul installation. Refer to [OpenShift requirements](/consul/docs/connect/gateways/api-gateway/tech-specs#openshift-requirements) for additional information.
-
-## Define the gateway and listeners
-
-Create an API gateway values file that defines the gateway and listeners.
-
-1. Specify the following fields:
- - `apiVersion`: Specifies the Kubernetes gateway API version. Must be `gateway.networking.k8s.io/v1beta1`.
- - `kind`: Specifies the type of configuration entry to implement. This must be `Gateway`.
- - `metadata.name`: Specify a name for the gateway configuration. The name is metadata that you can use to reference the configuration when performing Consul operations.
- - `spec.gatewayClassName`: Specify the name of a `gatewayClass` configuration. Gateway classes are template-like resources in Kubernetes for instantiating gateway services. Specify `consul` to use the default gateway class shipped with Consul. Refer to the [GatewayClass configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/gatewayclass) for additional information.
- - `spec.listeners`: Specify a list of listener configurations. Each listener is map containing the following fields:
- - `port`: Specifies the port that the listener receives traffic on.
- - `name`: Specifies a unique name for the listener.
- - `protocol`: You can set either `tcp` or `http`
- - `allowedRoutes.namespaces`: Contains configurations for determining which namespaces are allowed to attach a route to the listener.
-1. Configure any additional fields necessary for your use case, such as the namespace or admin partition. Refer to the [API gateway configuration entry reference](/consul/docs/connect/gateways/api-gateway/configuration/gateway) for additional information.
-1. Save the configuration.
-
-In the following example, the API gateway specifies an HTTP listener on port `80`:
-
-```yaml
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: Gateway
-metadata:
- name: my-gateway
- namespace: consul
-spec:
- gatewayClassName: consul
- listeners:
- - protocol: HTTP
- port: 80
- name: http
- allowedRoutes:
- namespaces:
- from: "All"
-```
-
-
-## Deploy the API gateway and listeners
-
-Apply the configuration to your cluster using the `kubectl` command. The following command applies the configuration to the `consul` namespace:
-
-```shell-session
-$ kubectl apply -f my-gateway.yaml -n consul
-```
\ No newline at end of file
diff --git a/website/content/docs/connect/gateways/api-gateway/deploy/listeners-vms.mdx b/website/content/docs/connect/gateways/api-gateway/deploy/listeners-vms.mdx
deleted file mode 100644
index 1a868ca85703..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/deploy/listeners-vms.mdx
+++ /dev/null
@@ -1,113 +0,0 @@
----
-layout: docs
-page_title: Deploy API gateway listeners to virtual machines
-description: Learn how to configure and Consul API gateways and gateway listeners on virtual machines so that you can enable ingress requests to services in your service mesh in VM environments.
----
-
-# Deploy API gateway listeners to virtual machines
-
-This topic describes how to deploy Consul API gateway listeners to networks that operate in virtual machine (VM) environments. If you want to implement API gateway listeners in a Kubernetes environment, refer to [Deploy API gateway listeners to Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s).
-
-## Overview
-
-API gateways have one or more listeners that serve as ingress points for requests to services in a Consul service mesh. Create an [API gateway configuration entry](/consul/docs/connect/config-entries/api-gateway) and define listeners that expose ports on the endpoint for ingress.
-
-The following steps describe the general workflow for deploying a Consul API gateway to a VM environment:
-
-1. Create an API gateway configuration entry. The configuration entry includes listener configurations and references to TLS certificates.
-1. Deploy the API gateway configuration entry to create the listeners.
-
-### Encryption
-
-To encrypt traffic between the external client and the service that the API gateway routes traffic to, define an inline certificate configuration and attach it to your listeners. Refer to [Encrypt API gateway traffic on virtual machines](/consul/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms) for additional information.
-
-### Routes
-
-After deploying the gateway, attach [HTTP](/consul/docs/connect/config-entries/http-route) routes and [TCP](/consul/docs/connect/config-entries/tcp-route) routes to listeners defined in the gateway to control how requests route to services in the network. Refer to [Define API gateway routes on VMs](/consul/docs/connect/gateways/api-gateway/define-routes/routes-vms) for additional information.
-
-## Requirements
-
-The following requirements must be satisfied to use API gateways on VMs:
-
-- Consul 1.15 or later
-- A Consul cluster with service mesh enabled. Refer to [`connect`](/consul/docs/agent/config/config-files#connect)
-- Network connectivity between the machine deploying the API Gateway and a
- Consul cluster agent or server
-
-### ACL requirements
-
-If ACLs are enabled, you must present a token with the following permissions to
-configure Consul and deploy API gateways:
-
-- `mesh: read`
-- `mesh: write`
-
-Refer to [Mesh Rules](/consul/docs/security/acl/acl-rules#mesh-rules) for
-additional information about configuring policies that enable you to interact
-with Consul API gateway configurations.
-
-## Define the gateway and listeners
-
-Create an API gateway configuration entry that defines listeners and TLS certificates
-in the mesh.
-
-1. Specify the following fields:
- - `Kind`: Specifies the type of configuration entry to implement. This must be `api-gateway`.
- - `Name`: Specify a name for the gateway configuration. The name is metadata that you can use to reference the configuration entry when performing Consul operations.
- - `Listeners`: Specify a list of listener configurations. Each listener is map containing the following fields:
- - `Port`: Specifies the port that the listener receives traffic on.
- - `Name`: Specifies a unique name for the listener.
- - `Protocol`: You can set either `tcp` or `http`
- - `TLS`: Defines TLS encryption configurations for the listener.
-
- Refer to the [API gateway configuration entry reference](/consul/docs/connect/config-entries/api-gateway) for details on how to define fields in the `Listeners` block.
-1. Configure any additional fields necessary for your use case, such as the namespace or admin partition. Refer to the [API gateway configuration entry reference](/consul/docs/connect/config-entries/api-gateway) for additional information.
-1. Save the configuration.
-
-In the following example, the API gateway specifies an HTTP listener on port `8443`. It also requires an inline-certificate configuration entry named `my-certificate` that contains a valid certificate and private key pair:
-
-```hcl
-Kind = "api-gateway"
-Name = "my-gateway"
-
-// Each listener configures a port which can be used to access the Consul cluster
-Listeners = [
- {
- Port = 8443
- Name = "my-http-listener"
- Protocol = "http"
- TLS = {
- Certificates = [
- {
- Kind = "inline-certificate"
- Name = "my-certificate"
- }
- ]
- }
- }
-]
-```
-
-Refer to [API Gateway Configuration Reference](/consul/docs/connect/gateways/api-gateway/configuration/api-gateway) for
-information about all configuration fields.
-
-Gateways and routes are eventually-consistent objects that provide feedback
-about their current state through a series of status conditions. As a result,
-you must manually check the route status to determine if the route
-bound to the gateway successfully.
-
-## Deploy the API gateway and listeners
-
-Use the `consul config write` command to implement the API gateway configuration entry. The following command applies the configuration entry for the main gateway object:
-
-```shell-session
-$ consul config write gateways.hcl
-```
-
-Run the following command to deploy an API gateway instance:
-
-```shell-session
-$ consul connect envoy -gateway api -register -service my-api-gateway
-```
-
-The command directs Consul to configure Envoy as an API gateway. Gateways and routes are eventually-consistent objects that provide feedback about their current state through a series of status conditions. As a result, you must manually check the route status to determine if the route successfully bound to the gateway successfully.
diff --git a/website/content/docs/connect/gateways/api-gateway/errors.mdx b/website/content/docs/connect/gateways/api-gateway/errors.mdx
deleted file mode 100644
index 96ed7364fe6e..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/errors.mdx
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: docs
-page_title: Consul API Gateway Error Messages
-description: >-
- Learn how to apply a configured Consul API Gateway to your Kubernetes cluster, review the required fields for rerouting HTTP requests, and troubleshoot an error message.
----
-
-# Error Messages
-
-This topic provides information about potential error messages associated with Consul API Gateway. If you receive an error message that does not appear in this section, refer to the following resources:
-
-- [Common Consul errors](/consul/docs/troubleshoot/common-errors#common-errors-on-kubernetes)
-- [Consul troubleshooting guide](/consul/docs/troubleshoot/common-errors)
-- [Consul Discuss forum](https://discuss.hashicorp.com/)
-
-
-
-## Helm installation failed: "no matches for kind"
-
-```log
-Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: [unable to recognize "": no matches for kind "GatewayClass" in version "gateway.networking.k8s.io/v1alpha2", unable to recognize "": no matches for kind "GatewayClassConfig" in version "api-gateway.consul.hashicorp.com/v1alpha1"]
-```
-**Conditions:**
-Consul API Gateway generates this error when the required CRD files have not been installed in Kubernetes prior to installing Consul API Gateway.
-
-**Impact:**
-The installation process typically fails after this error message is generated.
-
-**Resolution:**
-Install the required CRDs. Refer to the [Consul API Gateway installation instructions](/consul/docs/connect/gateways/api-gateway/deploy/install-k8s) for instructions.
-
-## Operation cannot be fulfilled, the object has been modified
-
-```
-{"error": "Operation cannot be fulfilled on gatewayclassconfigs.consul.hashicorp.com \"consul-api-gateway\": the object has been modified; please apply your changes to the latest version and try again"}
-
-```
-**Conditions:**
-This error occurs when the gateway controller attempts to update an object that has been modified previously. It is a normal part of running the controller and will resolve itself by automatically retrying.
-
-**Impact:**
-Excessive error logs are produced, but there is no impact to the functionality of the controller.
-
-**Resolution:**
-No action needs to be taken to resolve this issue.
diff --git a/website/content/docs/connect/gateways/api-gateway/index.mdx b/website/content/docs/connect/gateways/api-gateway/index.mdx
deleted file mode 100644
index c6202d898b08..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/index.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: docs
-page_title: API gateways overview
-description: API gateways provide an ingress point for service mesh traffic. Learn how API gateways add listeners for external traffic and route HTTP requests to services in the mesh.
----
-
-# API gateways overview
-
-This topic provides overview information about API gateways in Consul. API gateways enable external network clients to access applications and services running in a Consul datacenter. Consul API gateways can also forward requests from clients to specific destinations based on path or request protocol.
-
-## API gateway use cases
-
-API gateways solve the following primary use cases:
-
-- **Control access at the point of entry**: Set the protocols of external connection requests and secure inbound connections with TLS certificates from trusted providers, such as Verisign and Let's Encrypt.
-- **Simplify traffic management**: Load balance requests across services and route traffic to the appropriate service by matching one or more criteria, such as hostname, path, header presence or value, and HTTP method.
-
-## Workflows
-
-You can deploy API gateways to networks that implement a variety of computing environments:
-
-- Services hosted on VMs
-- Kubernetes-orchestrated service containers
-- Kubernetes-orchestrated service containers in OpenShift
-
-The following steps describe the general workflow for deploying a Consul API gateways:
-
-1. For Kubernetes-orchestrated services, install Consul on your cluster. For Kubernetes-orchestrated services on OpenShift, you must also enable the `openShift.enabled` parameter. Refer to [Install Consul on Kubernetes](/consul/docs/connect/gateways/api-gateway/install-k8s) for additional information.
-1. Define and deploy the API gateway configurations to create the API gateway artifacts. For VM-hosted services, create configuration entries for the gateway service, listeners configurations, and TLS certificates. For Kubernetes-orchestrated services, configurations also include `GatewayClassConfig` and `parametersRef`. All Consul API Gateways created in Kubernetes with the `consul-k8s` Helm chart v1.5.0 or later use file system certificates when TLS is enabled.
-
-1. Define and deploy routes between the gateway listeners and services in the mesh.
-
-Gateway configurations are modular, so you can define and attach routes and inline certificates to multiple gateways.
-
-## Technical specifications
-
-Refer to [Technical specifications for API gateways on Kubernetes](/consul/docs/connect/gateways/api-gateway/tech-specs) for additional details and considerations about using API gateways in Kubernetes-orchestrated networks.
-
-## Guidance
-
-Refer to the following resources for help setting up and using API gateways:
-
-### Tutorials
-
-- [Control access into the service mesh with Consul API gateway](/consul/tutorials/developer-mesh/kubernetes-api-gateway)
-
-### Usage documentation
-
-- [Deploy API gateway listeners to VMs](/consul/docs/connect/gateways/api-gateway/deploy/listeners-vms)
-- [Deploy API gateway listeners to Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s)
-- [Deploy API gateway routes to VMs](/consul/docs/connect/gateways/api-gateway/define-routes/routes-vms)
-- [Deploy API gateway routes to Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/routes-k8s)
-- [Reroute HTTP requests in Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/reroute-http-requests)
-- [Route traffic to peered services in Kubernetes](/consul/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services)
-- [Encrypt API gateway traffic on VMs](/consul/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms)
-- [Use JWTs to verify requests to API gateways on VMs](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms)
-- [Use JWTs to verify requests to API gateways on Kubernetes](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s)
-
-### Reference
-
-- [API gateway configuration reference overview](/consul/docs/connect/gateways/api-gateway/configuration/)
-- [Error messages](/consul/docs/connect/gateways/api-gateway/errors)
diff --git a/website/content/docs/connect/gateways/api-gateway/install-k8s.mdx b/website/content/docs/connect/gateways/api-gateway/install-k8s.mdx
deleted file mode 100644
index 9362100cde04..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/install-k8s.mdx
+++ /dev/null
@@ -1,133 +0,0 @@
----
-layout: docs
-page_title: Install API Gateway for Kubernetes
-description: >-
- Learn how to install custom resource definitions (CRDs) and configure the Helm chart so that you can run Consul API Gateway on your Kubernetes deployment.
----
-
-# Install API gateway for Kubernetes
-
-The Consul API gateway ships with Consul and is automatically installed when you install Consul on Kubernetes. Before you begin the installation process, verify that the environment you are deploying Consul and the API gateway in meets the requirements listed in the [Technical Specifications](/consul/docs/connect/gateways/api-gateway/tech-specs). Refer to the [Release Notes](/consul/docs/release-notes) for any additional information about the version you are deploying.
-
-1. The Consul Helm chart deploys the API gateway using the configuration specified in the `values.yaml` file. Refer to [Helm Chart Configuration - `connectInject.apiGateway`](/consul/docs/k8s/helm#apigateway) for information about the Helm chart configuration options. Create a `values.yaml` file for configuring your Consul API gateway deployment and include the following settings:
-
-
-
-
-
-
- ```yaml
- global:
- name: consul
- connectInject:
- enabled: true
- apiGateway:
- manageExternalCRDs: true
- ```
-
-
-
-
-
-
- If you are installing Consul on an OpenShift Kubernetes cluster, you must include the `global.openShift.enabled` parameter and set it to `true`. Refer to [OpenShift requirements](/consul/docs/connect/gateways/api-gateway/tech-specs#openshift-requirements) for additional information.
-
-
-
- ```yaml
- global:
- openshift:
- enabled: true
- connectInject:
- enabled: true
- apiGateway:
- manageExternalCRDs: true
- cni:
- enabled: true
- logLevel: info
- multus: true
- cniBinDir: "/var/lib/cni/bin"
- cniNetDir: "/etc/kubernetes/cni/net.d"
- ```
-
-
-
-
-
- By default, GKE Autopilot installs [Gateway API resources](https://gateway-api.sigs.k8s.io), so we recommend customizing the `connectInject.apiGateway` stanza to accommodate the pre-installed Gateway API CRDs.
-
- The following working example enables both Consul Service Mesh and Consul API Gateway on GKE Autopilot. Refer to [`connectInject.agiGateway` in the Helm chart reference](https://developer.hashicorp.com/consul/docs/k8s/helm#v-connectinject-apigateway) for additional information.
-
-
-
- ```yaml
- global:
- name: consul
- connectInject:
- enabled: true
- apiGateway:
- manageExternalCRDs: false
- manageNonStandardCRDs: true
- cni:
- enabled: true
- logLevel: debug
- cniBinDir: "/home/kubernetes/bin"
- cniNetDir: "/etc/cni/net.d"
- server:
- resources:
- requests:
- memory: "500Mi"
- cpu: "500m"
- limits:
- memory: "500Mi"
- cpu: "500m"
- ```
-
-
-
-
-
-1. Install Consul API Gateway using the standard Consul Helm chart or Consul K8s CLI specify the custom values file. Refer to the [Consul Helm chart](https://github.com/hashicorp/consul-k8s/releases) in GitHub releases for the available versions.
-
-
-
-
- Refer to the official [Consul K8S CLI documentation](/consul/docs/k8s/k8s-cli) to find additional settings.
-
- ```shell-session
- $ brew tap hashicorp/tap
- ```
-
- ```shell-session
- $ brew install hashicorp/tap/consul-k8s
- ```
-
- ```shell-session
- $ consul-k8s install -config-file=values.yaml -set global.image=hashicorp/consul:1.17.0
- ```
-
-
-
-
- Add the HashiCorp Helm repository.
-
- ```shell-session
- $ helm repo add hashicorp https://helm.releases.hashicorp.com
- ```
-
- Install Consul with API Gateway on your Kubernetes cluster by specifying the `values.yaml` file.
-
- ```shell-session
- $ helm install consul hashicorp/consul --version 1.3.0 --values values.yaml --create-namespace --namespace consul
- ```
-
-
-
-
-
-
-[tech-specs]: /consul/docs/api-gateway/tech-specs
-[rel-notes]: /consul/docs/release-notes
diff --git a/website/content/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms.mdx b/website/content/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms.mdx
deleted file mode 100644
index fd1cdfe86cb0..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms.mdx
+++ /dev/null
@@ -1,89 +0,0 @@
----
-layout: docs
-page_title: Encrypt API gateway traffic on virtual machines
-description: Learn how to define inline certificate config entries and deploy them to Consul. Inline certificate and file system certificate configuration entries enable you to attach TLS certificates and keys to gateway listeners so that traffic between external clients and gateway listeners is encrypted.
----
-
-# Encrypt API gateway traffic on virtual machines
-
-This topic describes how to make TLS certificates available to API gateways so that requests between the user and the gateway endpoint are encrypted.
-
-## Requirements
-
-- Consul v1.15 or later is required to use the Consul API gateway on VMs
- - Consul v1.19 or later is required to use the [file system certificate configuration entry](/consul/docs/connect/config-entries/file-system-certificate)
-- You must have a certificate and key from your CA
-- A Consul cluster with service mesh enabled. Refer to [`connect`](/consul/docs/agent/config/config-files#connect)
-- Network connectivity between the machine deploying the API gateway and a
- Consul cluster agent or server
-
-### ACL requirements
-
-If ACLs are enabled, you must present a token with the following permissions to
-configure Consul and deploy API gateways:
-
-- `mesh: read`
-- `mesh: write`
-
-Refer [Mesh Rules](/consul/docs/security/acl/acl-rules#mesh-rules) for
-additional information about configuring policies that enable you to interact
-with Consul API gateway configurations.
-
-## Define TLS certificates
-
-1. Create a [file system certificate](/consul/docs/connect/config-entries/file-system-certificate) or [inline certificate](/consul/docs/connect/config-entries/inline-certificate) and specify the following fields:
- - `Kind`: Specifies the type of configuration entry. This must be set to `file-system-certificate` or `inline-certificate`.
- - `Name`: Specify the name in the [API gateway listener configuration](/consul/docs/connect/gateways/api-gateway/configuration/api-gateway#listeners) to bind the certificate to that listener.
- - `Certificate`: Specifies the filepath to the certificate on the local system or the inline public certificate as plain text.
- - `PrivateKey`: Specifies the filepath to private key on the local system or the inline private key to as plain text.
-1. Configure any additional fields necessary for your use case, such as the namespace or admin partition. Refer to the [file system certificate configuration reference](/consul/docs/connect/config-entries/file-system-certificate) or [inline certificate configuration reference](/consul/docs/connect/config-entries/inline-certificate) for more information.
-1. Save the configuration.
-
-### Examples
-
-
-
-
-
-The following example defines a certificate named `my-certificate`. API gateway configurations that specify `inline-certificate` in the `Certificate.Kind` field and `my-certificate` in the `Certificate.Name` field are able to use the certificate.
-
-```hcl
-Kind = "inline-certificate"
-Name = "my-certificate"
-
-Certificate = <
-
-
-
-The following example defines a certificate named `my-certificate`. API gateway configurations that specify `file-system-certificate` in the `Certificate.Kind` field and `my-certificate` in the `Certificate.Name` field are able to use the certificate.
-
-```hcl
-Kind = "file-system-certificate"
-Name = "my-certificate"
-Certificate = "/opt/consul/tls/api-gateway.crt"
-PrivateKey = "/opt/consul/tls/api-gateway.key"
-```
-
-
-
-
-## Deploy the configuration to Consul
-
-Run the `consul config write` command to enable listeners to use the certificate. The following example writes a configuration called `my-certificate.hcl`:
-
-```shell-session
-$ consul config write my-certificate.hcl
-```
diff --git a/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s.mdx b/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s.mdx
deleted file mode 100644
index 6bd8f28ccd84..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s.mdx
+++ /dev/null
@@ -1,226 +0,0 @@
----
-layout: docs
-page_title: Use JWTs to verify requests to API gateways on Kubernetes
-description: Learn how to use JSON web tokens (JWT) to verify requests from external clients to listeners on an API gateway on Kubernetes-orchestrated networks.
----
-
-# Use JWTs to verify requests to API gateways on Kubernetes
-
-This topic describes how to use JSON web tokens (JWT) to verify requests to API gateways deployed to Kubernetes-orchestrated containers. If your API gateway is deployed to virtual machines, refer to [Use JWTs to verify requests to API gateways on VMs](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms).
-
- This feature is available in Consul Enterprise.
-
-## Overview
-
-You can configure API gateways to use JWTs to verify incoming requests so that you can stop unverified traffic at the gateway. You can configure JWT verification at different levels:
-
-- Listener defaults: Define basic defaults in a GatewayPolicy resource to apply them to all routes attached to a listener.
-- HTTP route-specific settings: You can define JWT authentication settings for specific HTTP routes. Route-specific JWT settings override default listener configurations.
-- Listener overrides: Define override settings in a GatewayPolicy resource that take precedence over default and route-specific configurations. Use override settings to set enforceable policies for listeners.
-
-
-Complete the following steps to use JWTs to verify requests:
-
-1. Define a JWTProvider that specifies the JWT provider and claims used to verify requests to the gateway.
-1. Define a GatewayPolicy that specifies default and override settings for API gateway listeners and attach it to the gateway.
-1. Define a RouteAuthFilter that specifies route-specific JWT verification settings.
-1. Reference the RouteAuthFilter from the HTTPRoute.
-1. Apply the configurations.
-
-
-## Requirements
-
-- Consul v1.17+
-- Consul on Kubernetes CLI or Helm chart v1.3.0+
-- JWT details, such as claims and provider
-
-
-## Define a JWTProvider
-
-Create a `JWTProvider` CRD that defines the JWT provider to verify claims against.
-
-In the following example, the JWTProvider CRD contains a local JWKS. In production environments, use a production-grade JWKs endpoint instead.
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: JWTProvider
-metadata:
- name: local
-spec:
- issuer: local
- jsonWebKeySet:
- local:
- jwks: ""
-```
-
-
-
-For more information about the fields you can configure in this CRD, refer to [`JWTProvider` configuration reference](/consul/docs/connect/config-entries/jwtprovider).
-
-## Define a GatewayPolicy
-
-Create a `GatewayPolicy` CRD that defines default and override settings for JWT verification.
-
-- `kind`: Must be set to `GatewayPolicy`
-- `metadata.name`: Specifies a name for the policy.
-- `spec.targetRef.name`: Specifies the name of the API gateway to attach the policy to.
-- `spec.targetRef.kind`: Specifies the kind of resource to attach to the policy to. Must be set to `Gateway`.
-- `spec.targetRef.group`: Specifies the resource group. Unless you have created a custom group, this should be set to `gateway.networking.k8s.io/v1beta1`.
-- `spec.targetRef.sectionName`: Specifies a part of the gateway that the policy applies to.
-- `spec.targetRef.override.jwt.providers`: Specifies a list of providers and claims used to verify requests to the gateway. The override settings take precedence over the default and route-specific JWT verification settings.
-- `spec.targetRef.default.jwt.providers`: Specifies a list of default providers and claims used to verify requests to the gateway.
-
-The following examples configure a Gateway and the GatewayPolicy being attached to it so that every request coming through the listener must meet these conditions:
-
-- The request must be signed by the `local` provider
-- The request must have a claim of `role` with a value of `user` unless the HTTPRoute attached to the listener overrides it
-
-
-
-
-
-
-```yaml
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: Gateway
-metadata:
- name: api-gateway
-spec:
- gatewayClassName: consul
- listeners:
- - protocol: HTTP
- port: 30002
- name: listener-one
-```
-
-
-
-
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: GatewayPolicy
-metadata:
- name: gw-policy
-spec:
- targetRef:
- name: api-gateway
- sectionName: listener-one
- group: gateway.networking.k8s.io/v1beta1
- kind: Gateway
- override:
- jwt:
- providers:
- - name: "local"
- default:
- jwt:
- providers:
- - name: "local"
- verifyClaims:
- - path:
- - role
- value: user
-```
-
-
-
-
-
-
-For more information about the fields you can configure, refer to [`GatewayPolicy` configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/gatewaypolicy).
-
-## Define a RouteAuthFilter
-
-Create an `RouteAuthFilter` CRD that defines overrides for the default JWT verification configured in the GatewayPolicy.
-
-- `kind`: Must be set to `RouteAuthFilter`
-- `metadata.name`: Specifies a name for the filter.
-- `metadata.namespace`: Specifies the Consul namespace the filter applies to.
-- `spec.jwt.providers`: Specifies a list of providers and claims used to verify requests to the gateway. The override settings take precedence over the default and route-specific JWT verification settings.
-
-In the following example, the RouteAuthFilter overrides default settings set in the GatewayPolicy so that every request coming through the listener must meet these conditions:
-
-- The request must be signed by the `local` provider
-- The request must have a `role` claim
-- The value of the claim must be `admin`
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: RouteAuthFilter
-metadata:
- name: auth-filter
-spec:
- jwt:
- providers:
- - name: local
- verifyClaims:
- - path:
- - role
- value: admin
-```
-
-
-
-For more information about the fields you can configure, refer to [`RouteAuthFilter` configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/routeauthfilter).
-
-## Attach the auth filter to your HTTP routes
-
-In the `filters` field of your HTTPRoute configuration, define the filter behavior that results from JWT verification.
-
-- `type: extensionRef`: Declare list of extension references.
-- `extensionRef.group`: Specifies the resource group. Unless you have created a custom group, this should be set to `gateway.networking.k8s.io/v1beta1`.
-- `extensionRef.kind`: Specifies the type of extension reference to attach to the route. Must be `RouteAuthFilter`
-- `extensionRef.name`: Specifies the name of the auth filter.
-
-The following example configures an HTTPRoute so that every request to `api-gateway-fqdn:3002/admin` must meet these conditions:
-
-- The request be signed by the `local` provider.
-- The request must have a `role` claim.
-- The value of the claim must be `admin`.
-
-Every other request must be signed by the `local` provider and have a claim of `role` with a value of `user`, as defined in the GatewayPolicy.
-
-
-
-```yaml
-apiVersion: gateway.networking.k8s.io/v1beta1
-kind: HTTPRoute
-metadata:
- name: http-route
-spec:
- parentRefs:
- - name: api-gateway
- rules:
- - matches:
- - path:
- type: PathPrefix
- value: /admin
- filters:
- - type: ExtensionRef
- extensionRef:
- group: consul.hashicorp.com
- kind: RouteAuthFilter
- name: auth-filter
- backendRefs:
- - kind: Service
- name: admin
- port: 8080
- - matches:
- - path:
- type: PathPrefix
- value: /
- backendRefs:
- - kind: Service
- name: user-service
- port: 8081
-```
-
-
diff --git a/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms.mdx b/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms.mdx
deleted file mode 100644
index fda579669fac..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms.mdx
+++ /dev/null
@@ -1,184 +0,0 @@
----
-layout: docs
-page_title: Use JWTs to verify requests to API gateways on virtual machines
-description: Learn how to use JSON web tokens (JWT) to verify requests from external clients to listeners on an API gateway.
----
-
-# Use JWTs to verify requests to API gateways on virtual machines
-
-This topic describes how to use JSON web tokens (JWT) to verify requests to API gateways on virtual machines (VM). If your services are deployed to Kubernetes-orchestrated containers, refer to [Use JWTs to verify requests to API gateways on Kubernetes](/consul/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s).
-
- This feature is available in Consul Enterprise.
-
-## Overview
-
-You can configure API gateways to use JWTs to verify incoming requests so that you can stop unverified traffic at the gateway. You can configure JWT verification at different levels:
-
-- Listener defaults: Define basic defaults that apply to all routes attached to a listener.
-- HTTP route-specific settings: You can define JWT authentication settings for specific HTTP routes. Route-specific JWT settings override default configurations.
-- Listener overrides: Define override settings that take precedence over default and route-specific configurations. This enables you to set enforceable policies for listeners.
-
-Complete the following steps to use JWTs to verify requests:
-
-1. Define a JWTProvider that specifies the JWT provider and claims used to verify requests to the gateway.
-1. Configure default and override settings for listeners in the API gateway configuration entry.
-1. Define route-specific JWT verification settings as filters in the HTTP route configuration entries.
-1. Write the configuration entries to Consul to begin verifying requests using JWTs.
-
-## Requirements
-
-- Consul 1.17 or later
-- JWT details, such as claims and provider
-
-## Define a JWTProvider
-
-Create a JWTProvider config entry that defines the JWT provider to verify claims against.
-In the following example, the JWTProvider CRD contains a local JWKS. In production environments, use a production-grade JWKs endpoint instead.
-
-
-
-```hcl
-Kind = "jwt-provider"
-Name = "local"
-
-Issuer = "local"
-
-JSONWebKeySet = {
- Local = {
- JWKS=""
- }
-}
-```
-
-
-
-For more information about the fields you can configure in this CRD, refer to [`JWTProvider` configuration reference](/consul/docs/connect/config-entries/jwtprovider).
-
-## Configure default and override settings
-
-Define default and override settings for JWT verification in the [API gateway configuration entry](/consul/docs/connect/gateways/api-gateway/configuration/api-gateway).
-
-1. Add a `default.JWT` block to the listener that you want to apply JWT verification to. Consul applies these configurations to routes attached to the listener. Refer to the [`Listeners.default.JWT`](/consul/docs/connect/config-entries/api-gateway#listeners-default-jwt) configuration reference for details.
-1. Add an `override.JWT` block to the listener that you want to apply JWT verification policies to. Consul applies these configurations to all routes attached to the listener, regardless of the `default` or route-specific settings. Refer to the [`Listeners.override.JWT`](/consul/docs/connect/config-entries/api-gateway#listeners-override-jwt) configuration reference for details.
-1. Apply the settings in the API gateway configuration entry. You can use the [`/config` API endpoint](/consul/api-docs/config#apply-configuration) or the [`consul config write` command](/consul/commands/config/write).
-
-The following examples configure a Gateway so that every request coming through the listener must meet these conditions:
-- The request must be signed by the `local` provider
-- The request must have a claim of `role` with a value of `user` unless the HTTPRoute attached to the listener overrides it
-
-
-
-```hcl
-Kind = "api-gateway"
-Name = "api-gateway"
-Listeners = [
- {
- Name = "listener-one"
- Port = 9001
- Protocol = "http"
- Override = {
- JWT = {
- Providers = [
- {
- Name = "local"
- }
- ]
- }
- }
- default = {
- JWT = {
- Providers = [
- {
- Name = "local"
- VerifyClaims = [
- {
- Path = ["role"]
- Value = "pet"
- }
- ]
- }
- ]
- }
- }
- }
-]
-```
-
-
-
-## Configure verification for specific HTTP routes
-
-Define filters to enable route-specific JWT verification settings in the [HTTP route configuration entry](/consul/docs/connect/config-entries/http-route).
-
-1. Add a `JWT` configuration to the `rules.filter` block. Route-specific configurations that overlap the [default settings ](/consul/docs/connect/config-entries/api-gateway#listeners-default-jwt) in the API gateway configuration entry take precedence. Configurations defined in the [listener override settings](/consul/docs/connect/config-entries/api-gateway#listeners-override-jwt) take the highest precedence.
-1. Apply the settings in the API gateway configuration entry. You can use the [`/config` API endpoint](/consul/api-docs/config#apply-configuration) or the [`consul config write` command](/consul/commands/config/write).
-
-The following example configures an HTTPRoute so that every request to `api-gateway-fqdn:3002/admin` must meet these conditions:
-- The request be signed by the `local` provider.
-- The request must have a `role` claim.
-- The value of the claim must be `admin`.
-
-Every other request must be signed by the `local` provider and have a claim of `role` with a value of `user`, as defined in the Gateway listener.
-
-
-
-```hcl
-Kind = "http-route"
-Name = "api-gateway-route"
-Parents = [
- {
- SectionName = "listener-one"
- Name = "api-gateway"
- Kind = "api-gateway"
- },
-]
-Rules = [
- {
- Matches = [
- {
- Path = {
- Match = "prefix"
- Value = "/admin"
- }
- }
- ]
- Filters = {
- JWT = {
- Providers = [
- {
- Name = "local"
- VerifyClaims = [
- {
- Path = ["role"]
- Value = "admin"
- }
- ]
- }
- ]
- }
- }
- Services = [
- {
- Name = "admin-service"
- }
- ]
- },
- {
- Matches = [
- {
- Path = {
- Match = "prefix"
- Value = "/"
- }
- }
- ]
- Services = [
- {
- Name = "user-service"
- }
- ]
- },
-]
-```
-
-
diff --git a/website/content/docs/connect/gateways/api-gateway/tech-specs.mdx b/website/content/docs/connect/gateways/api-gateway/tech-specs.mdx
deleted file mode 100644
index 9a79f75ca122..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/tech-specs.mdx
+++ /dev/null
@@ -1,154 +0,0 @@
----
-layout: docs
-page_title: API gateway for Kubernetes technical specifications
-description: >-
- Learn about the requirements for installing and using the Consul API gateway for Kubernetes, including required ports, component version minimums, Consul Enterprise limitations, and compatible k8s cloud environments.
----
-
-# API gateway for Kubernetes technical specifications
-
-This topic describes the requirements and technical specifications associated with using Consul API gateway.
-
-## Datacenter requirements
-
-Your datacenter must meet the following requirements prior to configuring the Consul API gateway:
-
-- HashiCorp Consul Helm chart v1.2.0 and later
-
-## TCP port requirements
-
-The following table describes the TCP port requirements for each component of the API gateway.
-
-| Port | Description | Component |
-| ---- | ----------- | --------- |
-| 20000 | Kubernetes readiness probe | Gateway instance pod |
-| Configurable | Port for scraping Prometheus metrics. Disabled by default. | Gateway controller pod |
-
-## OpenShift requirements
-
-You can deploy API gateways to Kubernetes clusters managed by Red Hat OpenShift, which is a security-conscious, opinionated wrapper for Kubernetes. To enable OpenShift support, add the following parameters to your Consul values file and apply the configuration:
-
-```yaml
- openshift:
- enabled: true
- ```
-
-Refer to the following topics for additional information:
-
-- [Install Consul on OpenShift clusters with Helm](/consul/docs/k8s/installation/install#install-consul-on-openshift-clusters)
-- [Install Consul on OpenShift clusters with the `consul-k8s` CLI](/consul/docs/k8s/installation/install-cli#install-consul-on-openshift-clusters)
-
-### Security context constraints
-
-OpenShift requires a security context constraint (SCC) configuration, which restricts pods to specific groups. You can create a custom SCC or use one of the default constraints. Refer to the [OpenShift documentation](https://docs.openshift.com/container-platform/4.13/authentication/managing-security-context-constraints.html) for additional information.
-
-By default, the SCC is set to `restricted-v2` for the `managedGatewayClass` that Consul automatically creates. The `restricted-v2` SCC is one of OpenShifts default SCCs, but you can specify a different SCC in the `openshiftSCCName` parameter:
-
-```yaml
-connectInject:
- apiGateway:
- managedGatewayClass:
- openshiftSCCName: "restricted-v2"
-```
-
-### Privileged container ports
-
-Containers cannot use privileged ports when OpenShift is enabled. Privileged ports are 1 through 1024, and serving applications from that range is a security risk.
-
-To allow gateway listeners to use privileged port numbers, specify an integer value in the `mapPrivilegedContainerPorts` field of your Consul values configuration. Consul adds the value to listener port numbers that are set to a number in the privileged container range. Consul maps the configured port number to the total port number so that traffic sent to the configured port number is correctly forwarded to the service.
-
-For example, if a gateway listener is configured to port `80` and the `mapPrivilegedContainerPorts` field is configured to `2000`, then the actual port number on the underlying container is `2080`.
-
-You can set the `mapPrivilegedContainerPorts` parameter in the following map in your Consul values file:
-
-```yaml
-connectInject:
- apiGateway:
- managedGatewayClass:
- mapPrivilegedContainerPorts:
-```
-
-## Supported versions of the Kubernetes gateway API specification
-
-Refer to the [release notes](/consul/docs/release-notes) for your version of Consul.
-
-## Supported Kubernetes gateway specification features
-
-Consul API gateways for Kubernetes support a subset of the Kubernetes Gateway API specification. For a complete list of features, including the list of gateway and route statuses and an explanation on how they
-are used, refer to the [documentation in our GitHub repo](https://github.com/hashicorp/consul-api-gateway/blob/main/dev/docs/supported-features.md):
-
-### `GatewayClass`
-
-The `GatewayClass` resource describes a class of gateway configurations to use a template for creating `Gateway` resources. You can also specify custom API gateway configurations in a `GatewayClassConfig` CRD and attach them to resource to the `GatewayClass` using the `parametersRef` field.
-
-You must specify the `"hashicorp.com/consul-api-gateway-controller"` controller so that Consul can manage gateways generated by the `GatewayClass`. Refer to the [Kubernetes `GatewayClass` documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.GatewayClass) for additional information.
-
-### `Gateway`
-
-The `Gateway` resource is the core API gateway component. Gateways have one or more listeners that can route `HTTP`, `HTTPS`, or `TCP` traffic. You can define header-based hostname matching for listeners, but SNI is not supported.
-
-You can apply filters to add, remove, and set header values on incoming requests. Gateways support the `terminate` TLS mode and `core/v1/Secret` TLS certificates. Extended option support includes TLS version and cipher constraints. Refer to [Kubernetes `Gateway` resource configuration reference](/consul/docs/connect/gateways/api-gateway/configuration/gateway) for more information.
-
-### `HTTPRoute`
-
-`HTTPRoute` configurations determine HTTP paths between listeners defined on the gateway and services in the mesh. You can specify weights to load balance traffic, as well as define rules for matching request paths, headers, queries, and methods to ensure that traffic is routed appropriately. You can apply filters to add, remove, and set header values on requests sent through th route.
-
-Routes support the following backend types:
-
-- `core/v1/Service` backend types when the route maps to service registered with Consul.
-- `api-gateway.consul.hashicorp.com/v1alpha1/MeshService`.
-
-Refer to [Kubernetes `HTTPRoute` documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRoute) for additional information.
-
-### `TCPRoute`
-
-`TCPRoute` configurations determine TCP paths between listeners defined on the gateway and services in the mesh. Routes support the following backend types:
-
-- `core/v1/Service` backend types when the route maps to service registered with Consul.
-- `api-gateway.consul.hashicorp.com/v1alpha1/MeshService`.
-
-Refer to [Kubernetes `TCPRoute` documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute) for additional information.
-
-### `ReferenceGrant`
-
-`ReferenceGrant` resources allow resources to reference resources in other namespaces. They are required to allow references from a `Gateway` to a Kubernetes `core/v1/Secret` in a different namespace. Without a `ReferenceGrant`, `backendRefs` attached to the gateway may not be permitted. As a result, the `ReferenceGrant` sets a `ResolvedRefs` status to `False` with the reason `InvalidCertificateRef`, which prevents the gateway from becoming ready.
-
-`ReferenceGrant` resources are also required for references from an `HTTPRoute` or `TCPRoute` to a Kubernetes `core/v1/Service` in a different namespace. Without a `ReferenceGrant`, `backendRefs` attached to the route may not be permitted. As a result, Kubernetes sets a `ResolvedRefs` status to `False` with the reason `RefNotPermitted`, which causes the gateway listener to reject the route.
-
-If a route `backendRefs` becomes unpermitted, the entire route is removed from the gateway listener. A `backendRefs` can become unpermitted when you delete a `ReferenceGrant` or add a new unpermitted `backendRefs` to an existing route.
-
-Refer to the [Kubernetes `ReferenceGrant` documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant) for additional information.
-
-## Consul server deployments
-
-- Consul Enterprise and the community edition are both supported.
-- Supported Consul Server deployment types:
- - Self-Managed
- - HCP Consul Dedicated
-
-### Consul feature support
-
-API gateways on Kubernetes support all Consul features, but you can only route traffic between multiple datacenters through peered connections. Refer to [Route Traffic to Peered Services](/consul/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services) for additional information. WAN federation is not supported.
-
-## Deployment Environments
-
-Consul API gateway can be deployed in the following Kubernetes-based environments:
-
-- Standard Kubernetes environments
-- AWS Elastic Kubernetes Service (EKS)
-- Google Kubernetes Engine (GKE)
-- Azure Kubernetes Service (AKS)
-
-## Resource allocations
-
-The following resources are allocated for each component of the API gateway.
-
-### Gateway controller pod
-
-- **CPU**: None. Either the namespace or cluster default is allocated, depending on the Kubernetes cluster configuration.
-- **Memory**: None. Either the namespace or cluster default is allocated, depending on the Kubernetes cluster configuration.
-
-### Gateway instance pod
-
-- **CPU**: None. Either the namespace or cluster default is allocated, depending on the Kubernetes cluster configuration.
-- **Memory**: None. Either the namespace or cluster default is allocated, depending on the Kubernetes cluster configuration.
diff --git a/website/content/docs/connect/gateways/api-gateway/upgrades-k8s.mdx b/website/content/docs/connect/gateways/api-gateway/upgrades-k8s.mdx
deleted file mode 100644
index 5b514340268f..000000000000
--- a/website/content/docs/connect/gateways/api-gateway/upgrades-k8s.mdx
+++ /dev/null
@@ -1,745 +0,0 @@
----
-layout: docs
-page_title: Upgrade API Gateway for Kubernetes
-description: >-
- Upgrade Consul API Gateway to use newly supported features. Learn about the requirements, procedures, and post-configuration changes involved in standard and specific version upgrades.
----
-
-# Upgrade API gateway for Kubernetes
-
-Since Consul v1.15, the Consul API gateway is a native feature within the Consul binary and is installed during the normal Consul installation process. Since Consul on Kubernetes v1.2 (Consul v1.16), the CRDs necessary for using the Consul API gateway for Kubernetes are also included. You can install Consul v1.16 using the Consul Helm chart v1.2 and later. Refer to [Install API gateway for Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/install-k8s) for additional information.
-
-## Introduction
-
-Because Consul API gateway releases as part of Consul, it no longer has an independent version number. Instead, the API gateway inherits the same version number as the Consul binary. Refer to the [release notes](/consul/docs/release-notes) for additional information.
-
-To begin using the native API gateway, complete one of the following upgrade paths:
-
-### Upgrade from Consul on Kubernetes v1.1.x
-
-1. Complete the instructions for [upgrading to the native Consul API gateway](#upgrade-to-native-consul-api-gateway).
-
-### Upgrade from v0.4.x - v0.5.x
-
-1. Complete the [standard upgrade instructions](#standard-upgrade)
-1. Complete the instructions for [upgrading to the native Consul API gateway](#upgrade-to-native-consul-api-gateway).
-
-### Upgrade from v0.3.x
-
-1. Complete the instructions for [upgrading to v0.4.0](#upgrade-to-v0-4-0)
-1. Complete the [standard upgrade instructions](#standard-upgrade)
-1. Complete the instructions for [upgrading to the native Consul API gateway](#upgrade-to-native-consul-api-gateway).
-
-### Upgrade from v0.2.x
-
-1. Complete the instructions for [upgrading to v0.3.0](#upgrade-to-v0-2-0)
-1. Complete the instructions for [upgrading to v0.4.0](#upgrade-to-v0-4-0)
-1. Complete the [standard upgrade instructions](#standard-upgrade)
-1. Complete the instructions for [upgrading to the native Consul API gateway](#upgrade-to-native-consul-api-gateway).
-
-### Upgrade from v0.1.x
-
-1. Complete the instructions for [upgrading to v0.2.0](#upgrade-to-v0-2-0)
-1. Complete the instructions for [upgrading to v0.3.0](#upgrade-to-v0-3-0)
-1. Complete the instructions for [upgrading to v0.4.0](#upgrade-to-v0-4-0)
-1. Complete the [standard upgrade instructions](#standard-upgrade)
-1. Complete the instructions for [upgrading to the native Consul API gateway](#upgrade-to-native-consul-api-gateway).
-
-## Upgrade to native Consul API gateway
-
-You must begin the upgrade procedure with API gateway with Consul on Kubernetes v1.1 installed. If you are currently using a version of Consul on Kubernetes older than v1.1, complete the necessary stages of the upgrade path to v1.1 before you begin upgrading to the native API gateway. Refer to the [Introduction](#introduction) for an overview of the upgrade paths.
-
-### Consul-managed CRDs
-
-If you are able to tolerate downtime for your applications, you should delete previously installed CRDs and allow Consul to install and manage them for future updates. The amount of downtime depends on how quickly you are able to install the new version of Consul. If you are unable to tolerate any downtime, refer to [Self-managed CRDs](#self-managed-crds) for instructions on how to upgrade without downtime.
-
-1. Run the `kubectl delete` command and reference the `kustomize` directory to delete the existing CRDs. The following example deletes the CRDs that were installed with API gateway `v0.5.1`:
-
- ```shell-session
- $ kubectl delete --kustomize="github.com/hashicorp/consul-api-gateway/config/crd?ref=v0.5.1"
- ```
-
-1. Issue the following command to use the API gateway packaged in Consul. Since Consul will not detected an external CRD, it will try to install the API gateway packaged with Consul.
-
- ```shell-session
- $ consul-k8s install -config-file values.yaml
- ```
-
-1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information.
-
-1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/connect/gateways/api-gateway/configuration/gateway#gatewayclassname) for additional information.
-
-1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller.
-
-
-
- ```diff
- global:
- image: hashicorp/consul:1.15
- imageK8S: hashicorp/consul-k8s-control-plane:1.1
- - apiGateway:
- - enabled: true
- - image: hashicorp/consul-api-gateway:0.5.4
- - managedGatewayClass:
- - enabled: true
- ```
-
-
-
- ```shell-session
- $ consul-k8s install -config-file values.yaml
- ```
-
-### Self-managed CRDs
-
-
-
- This upgrade method uses `connectInject.apiGateway.manageExternalCRDs`, which was introduced in Consul on Kubernetes v1.2. As a result, you must be on at least Consul on Kubernetes v1.2 for this upgrade method.
-
-
-
-If you are unable to tolerate any downtime, you can complete the following steps to upgrade to the native Consul API gateway. If you choose this upgrade option, you must continue to manually install the CRDs necessary for operating the API gateway.
-
-1. Create a Helm chart that installs the version of Consul API gateway that ships with Consul and disables externally-managed CRDs:
-
-
-
- ```yaml
- global:
- image: hashicorp/consul:1.16
- imageK8S: hashicorp/consul-k8s-control-plane:1.2
- connectInject:
- apiGateway:
- manageExternalCRDs: false
- apiGateway:
- enabled: true
- image: hashicorp/consul-api-gateway:0.5.4
- managedGatewayClass:
- enabled: true
- ```
-
-
-
- You must set `connectInject.apiGateway.manageExternalCRDs` to `false`. If you have external CRDs with legacy installation and you do not set this, you will get an error when you try to upgrade because Helm will try to install CRDs that already exist.
-
-1. Issue the following command to install the new version of API gateway and disables externally-managed CRDs:
-
- ```shell-session
- $ consul-k8s install -config-file values.yaml
- ```
-
-1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information.
-
-1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/connect/gateways/api-gateway/configuration/gateway#gatewayclassname) for additional information.
-
-1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller.
-
-
-
- ```diff
- global:
- image: hashicorp/consul:1.16
- imageK8S: hashicorp/consul-k8s-control-plane:1.2
- connectInject:
- apiGateway:
- manageExternalCRDs: false
- - apiGateway:
- - enabled: true
- - image: hashicorp/consul-api-gateway:0.5.4
- - managedGatewayClass:
- - enabled: true
- ```
-
-
-
- ```shell-session
- $ consul-k8s install -config-file values.yaml
- ```
-
-## Upgrade to v0.4.0
-
-Consul API Gateway v0.4.0 adds support for [Gateway API v0.5.0](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v0.5.0) and the following resources:
-
-- The graduated v1beta1 `GatewayClass`, `Gateway` and `HTTPRoute` resources.
-
-- The [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant) resource, which replaces the identical [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) resource.
-
-Consul API Gateway v0.4.0 is backward-compatible with existing `ReferencePolicy` resources, but we will remove support for `ReferencePolicy` resources in a future release. We recommend that you migrate to `ReferenceGrant` after upgrading.
-
-### Requirements
-
-Ensure that the following requirements are met prior to upgrading:
-
-- Consul API Gateway should be running version v0.3.0.
-
-### Procedure
-
-1. Complete the [standard upgrade](#standard-upgrade).
-
-1. After completing the upgrade, complete the [post-upgrade configuration changes](#v0.4.0-post-upgrade-configuration-changes). The post-upgrade procedure describes how to replace your `ReferencePolicy` resources with `ReferenceGrant` resources and how to upgrade your `GatewayClass`, `Gateway`, and `HTTPRoute` resources from v1alpha2 to v1beta1.
-
-
-
-### Post-upgrade configuration changes
-
-Complete the following steps after performing standard upgrade procedure.
-
-#### Requirements
-
-- Consul API Gateway should be running version v0.4.0.
-- Consul Helm chart should be v0.47.0 or later.
-- You should have the ability to run `kubectl` CLI commands.
-- `kubectl` should be configured to point to the cluster containing the installation you are upgrading.
-- You should have the following permissions for your Kubernetes cluster:
- - `Gateway.read`
- - `ReferenceGrant.create` (Added in Consul Helm chart v0.47.0)
- - `ReferencePolicy.delete`
-
-#### Procedure
-
-1. Verify the current version of the `consul-api-gateway-controller` `Deployment`:
-
- ```shell-session
- $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath="{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}"
- ```
-
- You should receive a response similar to the following:
-
- ```log hideClipboard
- "hashicorp/consul-api-gateway:0.4.0"
- ```
-
-
-
-1. Issue the following command to get all `ReferencePolicy` resources across all namespaces.
-
- ```shell-session
- $ kubectl get referencepolicy --all-namespaces
- ```
-If you have any active `ReferencePolicy` resources, you will receive output similar to the response below.
-
- ```log hideClipboard
- Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource.
- NAMESPACE NAME
- default example-reference-policy
- ```
-
- If your output is empty, upgrade your `GatewayClass`, `Gateway` and `HTTPRoute` resources to v1beta1 as described in [step 7](#v1beta1-gatewayclass-gateway-httproute).
-
-1. For each `ReferencePolicy` in the source YAML files, change the `kind` field to `ReferenceGrant`. You can optionally update the `metadata.name` field or filename if they include the term "policy". In the following example, the `kind` and `metadata.name` fields and filename have been changed to reflect the new resource. Note that updating the `kind` field prevents you from using the `kubectl edit` command to edit the remote state directly.
-
-
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: ReferenceGrant
- metadata:
- name: reference-grant
- namespace: web-namespace
- spec:
- from:
- - group: gateway.networking.k8s.io
- kind: HTTPRoute
- namespace: example-namespace
- to:
- - group: ""
- kind: Service
- name: web-backend
- ```
-
-
-
-1. For each file, apply the updated YAML to your cluster to create a new `ReferenceGrant` resource.
-
- ```shell-session
- $ kubectl apply --filename
- ```
-
-1. Check to confirm that each new `ReferenceGrant` was created successfully.
-
- ```shell-session
- $ kubectl get referencegrant --namespace
- NAME
- example-reference-grant
- ```
-
-1. Finally, delete each corresponding old `ReferencePolicy` resource. Because replacement `ReferenceGrant` resources have already been created, there should be no interruption in the availability of any referenced `Service` or `Secret`.
-
- ```shell-session
- $ kubectl delete referencepolicy --namespace
- Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource.
- referencepolicy.gateway.networking.k8s.io "example-reference-policy" deleted
- ```
-
-
-
-1. For each `GatewayClass`, `Gateway`, and `HTTPRoute` in the source YAML, update the `apiVersion` field to `gateway.networking.k8s.io/v1beta1`. Note that updating the `apiVersion` field prevents you from using the `kubectl edit` command to edit the remote state directly.
-
-
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1beta1
- kind: Gateway
- metadata:
- name: example-gateway
- namespace: gateway-namespace
- spec:
- ...
- ```
-
-
-
-1. For each file, apply the updated YAML to your cluster to update the existing `GatewayClass`, `Gateway` or `HTTPRoute` resources.
-
- ```shell-session
- $ kubectl apply --filename
- gateway.gateway.networking.k8s.io/example-gateway configured
- ```
-
-
-
-## Upgrade to v0.3.0 from v0.2.0 or lower
-
-Consul API Gateway v0.3.0 introduces a change for people upgrading from lower versions. Gateways with `listeners` with a `certificateRef` defined in a different namespace now require a [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) that explicitly allows `Gateways` from the gateway's namespace to use `certificateRef` in the `certificateRef`'s namespace.
-
-### Requirements
-
-Ensure that the following requirements are met prior to upgrading:
-
-- Consul API Gateway should be running version v0.2.1 or lower.
-- You should have the ability to run `kubectl` CLI commands.
-- `kubectl` should be configured to point to the cluster containing the installation you are upgrading.
-- You should have the following permission rights on your Kubernetes cluster:
- - `Gateway.read`
- - `ReferencePolicy.create`
-- (Optional) The [jq](https://stedolan.github.io/jq/download/) command line processor for JSON can be installed, which will ease gateway retrieval during the upgrade process.
-
-### Procedure
-
-
-1. Verify the current version of the `consul-api-gateway-controller` `Deployment`:
-
- ```shell-session
- $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath="{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}"
- ```
-
- You should receive a response similar to the following:
-
- ```log hideClipboard
- "hashicorp/consul-api-gateway:0.2.1"
- ```
-
-1. Retrieve all gateways that have a `certificateRefs` in a different namespace. If you have installed the [`jq`](https://stedolan.github.io/jq/) utility, you can skip to [step 4](#jq-command-secrets). Otherwise, issue the following command to get all `Gateways` across all namespaces:
-
- ```shell-session
- $ kubectl get Gateway --output json --all-namespaces
- ```
-
- If you have any active `Gateways`, you will receive output similar to the following response. The output has been truncated to show only relevant fields:
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: Gateway
- metadata:
- name: example-gateway
- namespace: gateway-namespace
- spec:
- gatewayClassName: "consul-api-gateway"
- listeners:
- - name: https
- port: 443
- protocol: HTTPS
- allowedRoutes:
- namespaces:
- from: All
- tls:
- certificateRefs:
- - group: ""
- kind: Secret
- name: example-certificate
- namespace: certificate-namespace
- ```
-
-1. Inspect the `certificateRefs` entries for each of the routes.
-
- If a `namespace` field is not defined in the `certificateRefs` or if the namespace matches the namespace of the parent `Gateway`, then no additional action is required for the `certificateRefs`. Otherwise, note the `namespace` field values for `certificateRefs` configurations with a `namespace` field that do not match the namespace of the parent `Gateway`. You must also note the `namespace` of the parent gateway. You will need these to create a `ReferencePolicy` that explicitly allows each cross-namespace certificateRefs-to-gateway pair. (see [step 5](#create-secret-reference-policy)).
-
- After completing this step, you will have a list of all secrets similar to the following:
-
-
-
- ```yaml hideClipboard
- example-certificate:
- - namespace: certificate-namespace
- parentNamespace: gateway-namespace
- ```
-
-
-
- Proceed with the [standard-upgrade](#standard-upgrade) if your list is empty.
-
-
-
-1. If you have installed [`jq`](https://stedolan.github.io/jq/), issue the following command to get all `Gateways` and filter for secrets that require a `ReferencePolicy`.
-
- ```shell-session
-
- $ kubectl get Gateway -o json -A | jq -r '.items[] | {gateway_name: .metadata.name, gateway_namespace: .metadata.namespace, kind: .kind, crossNamespaceSecrets: ( .metadata.namespace as $parentnamespace | .spec.listeners[] | select(has("tls")) | .tls.certificateRefs[] | select(.namespace != null and .namespace != $parentnamespace ) )} '
-
- ```
-
- The output will resemble the following response if gateways that require a new `ReferencePolicy` are returned:
-
-
-
- ```log hideClipboard
- {
- "gateway_name": "example-gateway",
- "gateway_namespace": "gateway-namespace",
- "kind": "Gateway",
- "crossNamespaceSecrets": {
- "group": "",
- "kind": "Secret",
- "name": "example-certificate",
- "namespace": "certificate-namespace"
- }
- }
- ```
-
-
-
- If your output is empty, proceed with the [standard-upgrade](#standard-upgrade).
-
-
-1. Using the list of secrets you created earlier as a guide, create a [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) to allow each gateway cross namespace secret access.
- The `ReferencePolicy` explicitly allows each cross-namespace gateway to secret pair. The `ReferencePolicy` must be created in the same `namespace` as the `certificateRefs`.
-
- Skip to the next step if you've already created a `ReferencePolicy`.
-
- The following example `ReferencePolicy` enables `example-gateway` in `gateway-namespace` to utilize `certificateRefs` in the `certificate-namespace` namespace:
-
-
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: ReferencePolicy
- metadata:
- name: reference-policy
- namespace: certificate-namespace
- spec:
- from:
- - group: gateway.networking.k8s.io
- kind: Gateway
- namespace: gateway-namespace
- to:
- - group: ""
- kind: Secret
- ```
-
-
-
-1. If you have already created a `ReferencePolicy`, modify it to allow your gateway to access your `certificateRef` and save it as `referencepolicy.yaml`. Note that each `ReferencePolicy` only supports one `to` field and one `from` field (refer the [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/api-types/referencegrant/#api-design-decisions) documentation). As a result, you may need to create multiple `ReferencePolicy`s.
-
-1. Issue the following command to apply it to your cluster:
-
- ```shell-session
- $ kubectl apply --filename referencepolicy.yaml
- ```
-
- Repeat this step as needed until each of your cross-namespace `certificateRefs` have a corresponding `ReferencePolicy`.
-
- Proceed with the [standard-upgrade](#standard-upgrade).
-
-## Upgrade to v0.2.0
-
-Consul API Gateway v0.2.0 introduces a change for people upgrading from Consul API Gateway v0.1.0. Routes with a `backendRef` defined in a different namespace now require a [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) that explicitly allows traffic from the route's namespace to the `backendRef`'s namespace.
-
-### Requirements
-
-Ensure that the following requirements are met prior to upgrading:
-
-- Consul API Gateway should be running version v0.1.0.
-- You should have the ability to run `kubectl` CLI commands.
-- `kubectl` should be configured to point to the cluster containing the installation you are upgrading.
-- You should have the following permission rights on your Kubernetes cluster:
- - `HTTPRoute.read`
- - `TCPRoute.read`
- - `ReferencePolicy.create`
-- (Optional) The [jq](https://stedolan.github.io/jq/download/) command line processor for JSON can be installed, which will ease route retrieval during the upgrade process.
-
-### Procedure
-
-1. Verify the current version of the `consul-api-gateway-controller` `Deployment`:
-
- ```shell-session
- $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath= "{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}"
- ```
-
- You should receive the following response:
-
- ```log hideClipboard
- "hashicorp/consul-api-gateway:0.1.0"
- ```
-
-1. Retrieve all routes that have a backend in a different namespace. If you have installed the [`jq`](https://stedolan.github.io/jq/) utility, you can skip to [step 4](#jq-command). Otherwise, issue the following command to get all `HTTPRoutes` and `TCPRoutes` across all namespaces:
-
- ```shell-session
- $ kubectl get HTTPRoute,TCPRoute --output json --all-namespaces
- ```
-
- Note that the command only retrieves `HTTPRoutes` and `TCPRoutes`. `TLSRoutes` and `UDPRoutes` are not supported in v0.1.0.
-
- If you have any active `HTTPRoutes` or `TCPRoutes`, you will receive output similar to the following response. The output has been truncated to show only relevant fields:
-
- ```yaml
- apiVersion: v1
- items:
- - apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: HTTPRoute
- metadata:
- name: example-http-route,
- namespace: example-namespace,
- ...
- spec:
- parentRefs:
- - group: gateway.networking.k8s.io
- kind: Gateway
- name: gateway
- namespace: gw-ns
- rules:
- - backendRefs:
- - group: ""
- kind: Service
- name: web-backend
- namespace: gateway-namespace
- ...
- ...
- - apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: TCPRoute
- metadata:
- name: example-tcp-route,
- namespace: a-different-namespace,
- ...
- spec:
- parentRefs:
- - group: gateway.networking.k8s.io
- kind: Gateway
- name: gateway
- namespace: gateway-namespace
- rules:
- - backendRefs:
- - group: ""
- kind: Service
- name: web-backend
- namespace: gateway-namespace
- ...
- ...
- ```
-
-1. Inspect the `backendRefs` entries for each of the routes.
-
- If a `namespace` field is not defined in the `backendRef` or if the namespace matches the namespace of the route, then no additional action is required for the `backendRef`. Otherwise, note the `group`, `kind`, `name`, and `namespace` field values for `backendRef` configurations that have a `namespace` defined that do not match the namespace of the parent route. You must also note the `kind` and `namespace` of the parent route. You will need these to create a `ReferencePolicy` that explicitly allows each cross-namespace route-to-service pair (see [step 5](#create-reference-policy)).
-
- After completing this step, you will have a list of all routes similar to the following:
-
-
-
- ```yaml hideClipboard
- example-http-route:
- - namespace: example-namespace
- kind: HTTPRoute
- backendReferences:
- - group : ""
- kind: Service
- name: web-backend
- namespace: gateway-namespace
-
- example-tcp-route:
- - namespace: a-different-namespace
- kind: HTTPRoute
- backendReferences:
- - group : ""
- kind: Service
- name: web-backend
- namespace: gateway-namespace
- ```
-
-
-
- Proceed with [standard-upgrade](#standard-upgrade) if your list is empty.
-
-
-1. If you have installed [`jq`](https://stedolan.github.io/jq/), issue the following command to get all `HTTPRoutes` and `TCPRoutes` and filter for routes that require a `ReferencePolicy`.
-
- ```shell-session
- $ kubectl get HTTPRoute,TCPRoute -o json -A | jq -r '.items[] | {name: .metadata.name, namespace: .metadata.namespace, kind: .kind, crossNamespaceBackendReferences: ( .metadata.namespace as $parentnamespace | .spec.rules[] .backendRefs[] | select(.namespace != null and .namespace != $parentnamespace ) )} '
- ```
-
- Note that the command retrieves all `HTTPRoutes` and `TCPRoutes`. `TLSRoutes` and `UDPRoutes` are not supported in v0.1.0.
-
- The output will resemble the following response if routes that require a new `ReferencePolicy` are returned:
-
-
-
- ```log hideClipboard
- {
- "name": "example-http-route",
- "namespace": "example-namespace",
- "kind": "HTTPRoute",
- "crossNamespaceBackendReferences": {
- "group": "",
- "kind": "Service",
- "name": "web-backend",
- "namespace": "gateway-namespace",
- "port": 8080,
- "weight": 1
- }
- }
- {
- "name": "example-tcp-route",
- "namespace": "a-different-namespace",
- "kind": "TCPRoute",
- "crossNamespaceBackendReferences": {
- "group": "",
- "kind": "Service",
- "name": "web-backend",
- "namespace": "gateway-namespace",
- "port": 8080,
- "weight": 1
- }
- }
- ```
-
-
-
- If your output is empty, proceed with the [standard-upgrade](#standard-upgrade).
-
-
-1. Using the list of routes you created earlier as a guide, create a [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) to allow cross namespace traffic for each route service pair.
- The `ReferencePolicy` explicitly allows each cross-namespace route to service pair. The `ReferencePolicy` must be created in the same `namespace` as the backend `Service`.
-
- Skip to the next step if you've already created a `ReferencePolicy`.
-
- The following example `ReferencePolicy` enables `HTTPRoute` traffic from the `example-namespace` to Kubernetes Services in the `web-backend` namespace:
-
-
-
- ```yaml
- apiVersion: gateway.networking.k8s.io/v1alpha2
- kind: ReferencePolicy
- metadata:
- name: reference-policy
- namespace: gateway-namespace
- spec:
- from:
- - group: gateway.networking.k8s.io
- kind: HTTPRoute
- namespace: example-namespace
- to:
- - group: ""
- kind: Service
- name: web-backend
- ```
-
-
-
-1. If you have already created a `ReferencePolicy`, modify it to allow your route and save it as `referencepolicy.yaml`. Note that each `ReferencePolicy` only supports one `to` field and one `from` field (refer the [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/api-types/referencegrant/#api-design-decisions) documentation). As a result, you may need to create multiple `ReferencePolicy`s.
-
-2. Issue the following command to apply it to your cluster:
-
- ```shell-session
- $ kubectl apply --filename referencepolicy.yaml
- ```
-
- Repeat this step as needed until each of your cross-namespace routes have a corresponding `ReferencePolicy`.
-
- Proceed with the [standard-upgrade](#standard-upgrade).
-
-
-## Standard Upgrade
-
-~> **Note:** When you see `VERSION` in examples of commands or configuration settings, replace `VERSION` with the version number of the release you are installing, like `0.2.0`. If there is a lower case "v" in front of `VERSION` the version number needs to follow the "v" as is `v0.2.0`
-
-### Requirements
-
-Ensure that the following requirements are met prior to upgrading:
-
-- You should have the ability to run `kubectl` CLI commands.
-- `kubectl` should be configured to point to the cluster containing the installation you are upgrading.
-
-
-### Procedure
-
-This is the upgrade path to use when there are no version specific steps to take.
-
-
-
-1. Issue the following command to install the new version of CRDs into your cluster:
-
- ``` shell-session
- $ kubectl apply --kustomize="github.com/hashicorp/consul-api-gateway/config/crd?ref=vVERSION"
- ```
-
-1. Update `apiGateway.image` in `values.yaml`:
-
-
-
- ```yaml
- ...
- apiGateway:
- image: hashicorp/consul-api-gateway:VERSION
- ...
- ```
-
-
-
-1. Issue the following command to upgrade your Consul installation:
-
- ```shell-session
- $ helm upgrade --values values.yaml --namespace consul --version hashicorp/consul
- ```
-
- Note that the upgrade will cause the Consul API Gateway controller shut down and restart with the new version.
-
-1. According to the Kubernetes Gateway API specification, [Gateway Class](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io%2fv1alpha2.GatewayClass) configurations should only be applied to a gateway upon creation. To see the effects on preexisting gateways after upgrading your CRD installation, delete and recreate any gateways by issuing the following commands:
-
- ```shell-session
- $ kubectl delete --filename
- $ kubectl create --filename
- ```
-
-
-1. (Optional) Delete and recreate your routes. Note that it may take several minutes for attached routes to reconcile and start reporting bind errors.
-
- ```shell-session
- $ kubectl delete --filename
- $ kubectl create --filename
- ```
-
-### Post-Upgrade Configuration Changes
-
-No additional configuration changes are required for this upgrade.
diff --git a/website/content/docs/connect/gateways/index.mdx b/website/content/docs/connect/gateways/index.mdx
deleted file mode 100644
index 067d3b277672..000000000000
--- a/website/content/docs/connect/gateways/index.mdx
+++ /dev/null
@@ -1,101 +0,0 @@
----
-layout: docs
-page_title: Gateways Overview
-description: >-
- Gateways are proxies that direct traffic into, out of, and inside of Consul's service mesh. They secure communication with external or non-mesh network resources and enable services on different runtimes, cloud providers, or with overlapping IP addresses to communicate with each other.
----
-
-# Gateways Overview
-
-This topic provides an overview of the gateway features shipped with Consul. Gateways provide connectivity into, out of, and between Consul service meshes. You can configure the following types of gateways:
-
-- [Mesh gateways](#mesh-gateways) enable service-to-service traffic between Consul datacenters or between Consul admin partitions. They also enable datacenters to be federated across wide area networks.
-- [Ingress gateways](#ingress-gateways) enable connectivity within your organizational network from services outside the Consul service mesh to services in the mesh.
-- [Terminating gateways](#terminating-gateways) enable connectivity within your organizational network from services in the Consul service mesh to services outside the mesh.
-
-[](/img/consul-connect/svgs/consul_gateway_overview.svg)
-
-## Mesh Gateways
-
-Mesh gateways enable service mesh traffic to be routed between different Consul datacenters and admin partitions. The datacenters or partitions can reside
-in different clouds or runtime environments where general interconnectivity between all services in all datacenters
-isn't feasible.
-
-They operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested.
-
-Mesh gateways enable the following scenarios:
-
-* **Federate multiple datacenters across a WAN**. Since Consul 1.8.0, mesh gateways can forward gossip and RPC traffic between Consul servers. See [WAN federation via mesh gateways](/consul/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways) for additional information.
-- **Service-to-service communication across WAN-federated datacenters**. Refer to [Enabling Service-to-service Traffic Across Datacenters](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters) for additional information.
-- **Service-to-service communication across admin partitions**. Since Consul 1.11.0, you can create administrative boundaries for single Consul deployments called "admin partitions". You can use mesh gateways to facilitate cross-partition communication. Refer to [Enabling Service-to-service Traffic Across Admin Partitions](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions) for additional information.
-- **Bridge multiple datacenters using Cluster Peering**. Since Consul 1.14.0, mesh gateways can be used to route peering control-plane traffic between peered Consul Servers. See [Mesh Gateways for Peering Control Plane Traffic](/consul/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways) for more information.
-- **Service-to-service communication across peered datacenters**. Refer to [Establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering) for more information.
-
--> **Mesh gateway tutorial**: Follow the [mesh gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways) to learn concepts associated with mesh gateways.
-
-## API Gateways
-
-API gateways enable network access, from outside a service mesh, to services running in a Consul service mesh. The
-systems accessing the services in the mesh, may be within your organizational network or external to it. This type of
-network traffic is commonly called _north-south_ network traffic because it refers to the flow of data into and out of
-a specific environment.
-
-API gateways solve the following primary use cases:
-
-- **Control access at the point of entry**: Set the protocols of external connection
- requests and secure inbound connections with TLS certificates from trusted
- providers, such as Verisign and Let's Encrypt.
-- **Simplify traffic management**: Load balance requests across services and route
- traffic to the appropriate service by matching one or more criteria, such as
- hostname, path, header presence or value, and HTTP method.
-
-Refer to the following documentation for information on how to configure and deploy API gateways:
-- [API Gateways on VMs](/consul/docs/connect/gateways/api-gateway/deploy/listeners-vms)
-- [API Gateways for Kubernetes](/consul/docs/connect/gateways/api-gateway/deploy/listeners-k8s).
-
-
-## Ingress Gateways
-
-
-
-Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported
-in this version but will be removed in a future release of Consul.
-
-Consul's API gateway is the recommended alternative to ingress gateway.
-
-
-
-Ingress gateways enable connectivity within your organizational network from services outside the Consul service mesh
-to services in the mesh. To accept ingress traffic from the public internet, use Consul's
-[API Gateway](https://www.hashicorp.com/blog/announcing-hashicorp-consul-api-gateway) instead.
-
-These gateways allow you to define what services should be exposed, on what port, and by what hostname. You configure
-an ingress gateway by defining a set of listeners that can map to different sets of backing services.
-
-Ingress gateways are tightly integrated with Consul's L7 configuration and enable dynamic routing of HTTP requests by
-attributes like the request path.
-
-For more information about ingress gateways, review the [complete documentation](/consul/docs/connect/gateways/ingress-gateway)
-and the [ingress gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-ingress-gateways).
-
-
-
-## Terminating Gateways
-
-Terminating gateways enable connectivity within your organizational network from services in the Consul service mesh
-to services outside the mesh.
-Services outside the mesh do not have sidecar proxies or are not [integrated natively](/consul/docs/connect/native).
-These may be services running on legacy infrastructure or managed cloud services running on
-infrastructure you do not control.
-
-Terminating gateways effectively act as egress proxies that can represent one or more services. They terminate service mesh
-mTLS connections, enforce Consul intentions, and forward requests to the appropriate destination.
-
-These gateways also simplify authorization from dynamic service addresses. Consul's intentions determine whether
-connections through the gateway are authorized. Then traditional tools like firewalls or IAM roles can authorize the
-connections from the known gateway nodes to the destination services.
-
-For more information about terminating gateways, review the [complete documentation](/consul/docs/connect/gateways/terminating-gateway)
-and the [terminating gateway tutorial](/consul/tutorials/developer-mesh/terminating-gateways-connect-external-services).
-
-
diff --git a/website/content/docs/connect/gateways/ingress-gateway/index.mdx b/website/content/docs/connect/gateways/ingress-gateway/index.mdx
deleted file mode 100644
index 3f0b4ea836f9..000000000000
--- a/website/content/docs/connect/gateways/ingress-gateway/index.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: docs
-page_title: Ingress gateway overview
-description: >-
- Ingress gateways enable you to connect external services to services in your mesh. Ingress gateways are a type of proxy that listens for requests from external network locations and route authorized traffic to destinations in the service mesh.
----
-
-# Ingress gateways overview
-
-An ingress gateway is a type of proxy that enables network connectivity from external services to services inside the mesh. The following diagram describes the ingress gateway workflow:
-
-
-
-
-
-Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported
-in this version but will be removed in a future release of Consul.
-
-Consul's API gateway is the recommended alternative to ingress gateway.
-
-
-
-## Workflow
-
-The following stages describe how to add an ingress gateway to your service mesh:
-
-1. Configure ingress gateway listeners: Create an ingress gateway configuration entry and specify which services to expose to external requests. The configuration entry allows you to define what services should be exposed, on what port, and by what hostname. You can expose services registered with Consul or expose virtual services defined in other configuration entries. Refer to [Ingress gateway configuration entry reference](/consul/docs/connect/config-entries/ingress-gateway) for details on the configuration parameters you can specify.
-
-1. Define an ingress gateway proxy service: Ingress gateways are a special-purpose proxy service that you can define and register in a similar manner to other services. When you register the ingress gateway service, Consul applies the configurations defined in the ingress gateway configuration reference. Refer to [Implement an ingress gateway](/consul/docs/connect/gateways/ingress-gateway/usage) for additional information.
-
-1. Start the network proxy: The ingress gateway proxy service accepts configurations from the configuration entry and directs requests to the exposed services. When the external traffic passes through the ingress gateway, your sidecar proxy handles the inbound and outbound connections between the exposed services and the gateway. Refer to [Service mesh proxy overview](/consul/docs/connect/proxies) to learn more about the proxies Consul supports.
-
-## Integrations with custom TLS management solutions
-
-You can configure the ingress gateway to retrieve and serve custom TLS certificates from external systems. This functionality is designed to help you integrate with custom TLS management software. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for additional information.
\ No newline at end of file
diff --git a/website/content/docs/connect/gateways/ingress-gateway/tls-external-service.mdx b/website/content/docs/connect/gateways/ingress-gateway/tls-external-service.mdx
deleted file mode 100644
index d3d116761831..000000000000
--- a/website/content/docs/connect/gateways/ingress-gateway/tls-external-service.mdx
+++ /dev/null
@@ -1,253 +0,0 @@
----
-layout: docs
-page_title: Serve custom TLS certificates from an external service
-description: Learn how to configure ingress gateways to serve TLS certificates from an external service to using secret discovery service. The SDS feature is designed for developers building integrations with custom TLS management solutions.
----
-
-# Serve custom TLS certificates from an external service
-
-This is an advanced topic that describes how to configure ingress gateways to serve TLS certificates sourced from an external service to inbound traffic using secret discovery service (SDS). SDS is a low-level feature designed for developers building integrations with custom TLS management solutions. For instructions on more common ingress gateway implementations, refer to [Implement an ingress gateway](/consul/docs/connect/gateways/ingress-gateway/usage).
-
-## Overview
-
-The following process describes the general procedure for configuring ingress gateways to serve TLS certificates sourced from external services:
-
-1. Configure static SDS clusters in the ingress gateway service definition.
-1. Register the service definition.
-1. Configure TLS client authentication
-1. Start Envoy.
-1. Configure SDS settings in an ingress gateway configuration entry.
-1. Register the ingress gateway configuration entry with Consul.
-
-## Requirements
-
-- The external service must implement Envoy's [gRPC secret discovery service (SDS) API](https://www.envoyproxy.io/docs/envoy/latest/configuration/security/secret).
-- You should have some familiarity with Envoy configuration and the SDS protocol.
-- The [`connect.enabled` parameter](/consul/docs/agent/config/config-files#connect) must be set to `true` for all server agents in the Consul datacenter.
-- The [`ports.grpc` parameter](/consul/docs/agent/config/config-files#connect) must be configured for all server agents in the Consul datacenter.
-
-### ACL requirements
-
-If ACLs are enabled, you must present a token when registering ingress gateways that grant the following permissions:
-
-- `service:write` for the ingress gateway's service name
-- `service:read` for all services in the ingress gateway's configuration entry
-- `node:read` for all nodes of the services in the ingress gateway's configuration entry.
-
-These privileges authorize the token to route communications to other services in the mesh. If the Consul client agent on the gateway's node is not configured to use the default gRPC port, `8502`, then the gateway's token must also provide `agent:read` for its node's name in order to discover the agent's gRPC port. gRPC is used to expose Envoy's xDS API to Envoy proxies.
-
-## Configure static SDS clusters
-
-You must define one or more additional static clusters in the ingress gateway service definition for each Envoy proxy associated with the gateway. The additional clusters define how Envoy should connect to the required SDS services.
-
-Configure the static clusters in the [`Proxy.Config.envoy_envoy_extra_static_clusters_json`](/consul/docs/connect/proxies/envoy#envoy_extra_static_clusters_json) parameter in the service definition.
-
-The clusters must provide connection information and any necessary authentication information, such as mTLS credentials.
-
-You must manually register the ingress gateway with Consul proxy to define extra clusters in Envoy's bootstrap configuration. You can not use the `-register` flag with `consul connect envoy -gateway=ingress` to automatically register the proxy to define static clusters.
-
-In the following example, the `public-ingress` gateway includes a static cluster named `sds-cluster` that specifies paths to the SDS certificate and SDS certification validation files:
-
-
-
-
-```hcl
-Services {
- Name = "public-ingress"
- Kind = "ingress-gateway"
-
- Proxy {
- Config {
- envoy_extra_static_clusters_json = <
-
-Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/v1.17.2/api-v3/config/bootstrap/v3/bootstrap.proto#envoy-v3-api-field-config-bootstrap-v3-bootstrap-staticresources-clusters) for details about configuration parameters for SDS clusters.
-
-## Register the ingress gateway service definition
-
-Issue the `consul services register` command on the Consul agent on the Envoy proxy's node to register the service. The following example command registers an ingress gateway proxy from a `public-ingress.hcl` file:
-
-```shell-session
-$ consul services register public-ingress.hcl
-```
-
-Refer to [Register services and health checks](/consul/docs/services/usage/register-services-checks) for additional information about registering services in Consul.
-
-## Configure TLS client authentication
-
-Store TLS client authentication files, certificate files, and keys on disk where the Envoy proxy runs and ensure that they are available to Consul. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/bootstrap/bootstrap) for details on configuring authentication files.
-
-The following example specifies certificate chain:
-
-
-
-
-```json
-{
- "resources": [
- {
- "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",
- "name": "tls_sds",
- "tls_certificate": {
- "certificate_chain": {
- "filename": "/certs/sds-client-auth.crt"
- },
- "private_key": {
- "filename": "/certs/sds-client-auth.key"
- }
- }
- }
- ]
-}
-```
-
-
-
-The following example specifies the validation context:
-
-
-
-```json
-{
- "resources": [
- {
- "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",
- "name": "validation_context_sds",
- "validation_context": {
- "trusted_ca": {
- "filename": "/certs/sds-ca.crt"
- }
- }
- }
- ]
-}
-```
-
-
-
-## Start Envoy
-
-Issue the `consul connect envoy` command to bootstrap Envoy. The following example starts Envoy and registers it as a service called `public-ingress`:
-
-```shell-session
-$ consul connect envoy -gateway=ingress -service public-ingress
-```
-
-Refer to [Consul Connect Envoy](/consul/commands/connect/envoy) for additional information about using the `consul connect envoy` command.
-
-## Define an ingress gateway configuration entry
-
-Create an ingress gateway configuration entry that enables the gateway to use certificates from SDS. The configuration entry also maps downstream ingress listeners to upstream services. Configure the following fields:
-
-- [`Kind`](/consul/docs/connect/config-entries/ingress-gateway#kind): Set the value to `ingress-gateway`.
-- [`Name`](/consul/docs/connect/config-entries/ingress-gateway#name): Consul applies the configuration entry settings to ingress gateway proxies with names that match the `Name` field.
-- [`TLS`](/consul/docs/connect/config-entries/ingress-gateway#tls): The main `TLS` parameter for the configuration entry holds the SDS configuration. You can also specify TLS configurations per listener and per service.
- - [`TLS.SDS`](/consul/docs/connect/config-entries/ingress-gateway#tls-sds): The `SDS` map includes the following configuration settings:
- - [`ClusterName`](/consul/docs/connect/config-entries/ingress-gateway#tls-sds-clustername): Specifies the name of the cluster you specified when [configuring the SDS cluster](#configure-static-SDS-clusters).
- - [`CertResource`](/consul/docs/connect/config-entries/ingress-gateway#tls-sds-certresource): Specifies the name of the certificate resource to load.
-- [`Listeners`](/consul/docs/connect/config-entries/ingress-gateway#listeners): Specify one or more listeners.
- - [`Listeners.Port`](/consul/docs/connect/config-entries/ingress-gateway#listeners-port): Specify a port for the listener. Each listener is uniquely identified by its port number.
- - [`Listeners.Protocol`](/consul/docs/connect/config-entries/ingress-gateway#listeners-protocol): The default protocol is `tcp`, but you must specify the protocol used by the services you want to allow traffic from.
- - [`Listeners.Services`](/consul/docs/connect/config-entries/ingress-gateway#listeners-services): The `Services` field contains the services that you want to expose to upstream services. The field contains several options and sub-configurations that enable granular control over ingress traffic, such as health check and TLS configurations.
-
-For Consul Enterprise service meshes, you may also need to configure the [`Partition`](/consul/docs/connect/config-entries/ingress-gateway#partition) and [`Namespace`](/consul/docs/connect/config-entries/ingress-gateway#namespace) fields for the gateway and for each exposed service.
-
-Refer to [Ingress gateway configuration entry reference](/consul/docs/connect/config-entries/ingress-gateway) for details about the supported parameters.
-
-The following example directs Consul to retrieve `example.com-public-cert` certificates from an SDS cluster named `sds-cluster` and serve them to all listeners:
-
-
-
-```hcl
-Kind = "ingress-gateway"
-Name = "public-ingress"
-
-TLS {
- SDS {
- ClusterName = "sds-cluster"
- CertResource = "example.com-public-cert"
- }
-}
-
-Listeners = [
- {
- Port = 8443
- Protocol = "http"
- Services = ["*"]
- }
-]
-```
-
-
-
-## Register the ingress gateway configuration entry
-
-You can register the configuration entry using the [`consul config` command](/consul/commands/config) or by calling the [`/config` API endpoint](/consul/api-docs/config). Refer to [How to Use Configuration Entries](/consul/docs/agent/config-entries) for details about applying configuration entries.
-
-The following example registers an ingress gateway configuration entry named `public-ingress-cfg.hcl` that is stored on the local system:
-
-```shell-session
-$ consul config write public-ingress-cfg.hcl
-```
-
-The Envoy instance starts a listener on the port specified in the configuration entry and fetches the TLS certificate named from the SDS server.
diff --git a/website/content/docs/connect/gateways/ingress-gateway/usage.mdx b/website/content/docs/connect/gateways/ingress-gateway/usage.mdx
deleted file mode 100644
index 2b4c55e279a4..000000000000
--- a/website/content/docs/connect/gateways/ingress-gateway/usage.mdx
+++ /dev/null
@@ -1,127 +0,0 @@
----
-layout: docs
-page_title: Implement an ingress gateway
-description: Learn how to implement ingress gateways, which are Consul service mesh constructs that listen for requests from external network locations and route authorized traffic to destinations in the service mesh.
----
-
-# Implement an ingress gateway
-
-This topic describes how to add ingress gateways to your Consul service mesh. Ingress gateways enable connectivity within your organizational network by allowing services outside of the service mesh to send traffic to services in the mesh. Refer to [Ingress gateways overview](/consul/docs/connect/gateways/ingress-gateway/) for additional information about ingress gateways.
-
-This topic describes ingress gateway usage for virtual machine (VM) environments. Refer to [Configure ingress gateways for Consul on Kubernetes](/consul/docs/k8s/connect/ingress-gateways) for instructions on how to implement ingress gateways in Kubernetes environments.
-
-## Overview
-
-Ingress gateways are a type of proxy service included with Consul. Complete the following steps to set up an ingress gateway:
-
-1. Define listeners and the services they expose. Specify these details in an ingress gateway configuration entry.
-1. Register an ingress gateway service. Define the services in a service definition file.
-1. Start the ingress gateway. This step deploys the Envoy proxy that functions as the ingress gateway.
-
-After specifying listeners and services in the ingress gateway configuration entry, you can register the gateway service and start Envoy with a single CLI command instead of completing these steps separately. Refer [Register an ingress service on Envoy startup](#register-an-ingress-service-on-envoy-startup).
-
-## Requirements
-
-- Service mesh must be enabled for all agents. Set the [`connect.enabled` parameter](/consul/docs/agent/config/config-files#connect) to `true` to enable service mesh.
-- The gRPC port must be configured for all server agents in the datacenter. Specify the gRPC port in the [`ports.grpc` parameter](/consul/docs/agent/config/config-files#grpc_port). We recommend setting the port to `8502` to simplify configuration when ACLs are enabled. Refer to [ACL requirements](#acl-requirements) for additional information.
-- You must use Envoy for sidecar proxies in your service mesh. Refer to [Envoy Proxy Configuration for Service Mesh](/consul/docs/connect/proxies/envoy) for supported versions.
-
-### ACL requirements
-
-If ACLs are enabled, you must present a token when registering ingress gateways that grant the following permissions:
-
-`service:write` for the ingress gateway's service name
-`service:read` for all services in the ingress gateway's configuration entry
-`node:read` for all nodes of the services in the ingress gateway's configuration entry.
-
-These privileges authorize the token to route communications to other services in the mesh. If the Consul client agent on the gateway's node is not configured to use the default `8502` gRPC port, then the gateway's token must also provide `agent:read` for its node's name in order to discover the agent's gRPC port. gRPC is used to expose Envoy's xDS API to Envoy proxies.
-
-## Expose services
-
-Define and apply an ingress gateway configuration entry to specify which services in the mesh to expose to external services.
-
-### Define an ingress gateway configuration entry
-
-Ingress gateway configuration entries map downstream ingress listeners to upstream services. When you register an ingress gateway proxy that matches the configuration entry name, Consul applies the settings specified in the configuration entry. Configure the following fields:
-
-- [`Kind`](/consul/docs/connect/config-entries/ingress-gateway#kind): Set the value to `ingress-gateway`.
-- [`Name`](/consul/docs/connect/config-entries/ingress-gateway#name): Consul applies the configuration entry settings to ingress gateway proxies with names that match the `Name` field.
-- [`Listeners`](/consul/docs/connect/config-entries/ingress-gateway#listeners): Specify one or more listeners.
- - [`Listeners.Port`](/consul/docs/connect/config-entries/ingress-gateway#listeners-port): Specify a port for the listener. Each listener is uniquely identified by its port number.
- - [`Listeners.Protocol`](/consul/docs/connect/config-entries/ingress-gateway#listeners-protocol): The default protocol is `tcp`, but you must specify the protocol used by the services you want to allow traffic from.
- - [`Listeners.Services`](/consul/docs/connect/config-entries/ingress-gateway#listeners-services): The `Services` field contains the services that you want to expose to upstream services. The field contains several options and sub-configurations that enable granular control over ingress traffic, such as health check and TLS configurations.
-
-For Consul Enterprise service meshes, you may also need to configure the [`Partition`](/consul/docs/connect/config-entries/ingress-gateway#partition) and [`Namespace`](/consul/docs/connect/config-entries/ingress-gateway#namespace) fields for the gateway and for each exposed service.
-
-Refer to [Ingress gateway configuration entry reference](/consul/docs/connect/config-entries/ingress-gateway) for details about the supported parameters.
-
-### Register an ingress gateway configuration entry
-
-You can register the configuration entry using the [`consul config` command](/consul/commands/config) or by calling the [`/config` API endpoint](/consul/api-docs/config). Refer to [How to Use Configuration Entries](/consul/docs/agent/config-entries) for details about applying configuration entries.
-
-The following example registers an ingress gateway configuration entry named `public-ingress.hcl` that is stored on the local system:
-
-```shell-session
-$ consul config write public-ingress.hcl
-```
-
-## Deploy an ingress gateway service
-
-To deploy an ingress gateway service, create a service definition and register it with Consul.
-
-You can also define an ingress gateway service and register it with Consul while starting an Envoy proxy from the command line. Refer to [Register an ingress service on Envoy startup](#register-an-ingress-service-on-envoy-startup) for details.
-
-### Create a service definition for the ingress gateway
-
-Consul applies the settings defined in the ingress gateway configuration entry to ingress gateway services that match the configuration entry name. Refer to [Define services](/consul/docs/services/usage/define-services) for additional information about defining services in Consul.
-
-The following fields are required for the ingress gateway service definition:
-
-- [`Kind`](/consul/docs/services/configuration/services-configuration-reference#kind): The field must be set to `ingress-gateway`.
-- [`Name`](/consul/docs/services/configuration/services-configuration-reference#name): The name should match the value specified for the `Name` field in the configuration entry.
-
-All other service definition fields are optional, but we recommend defining health checks to verify the health of the gateway. Refer to [Services configuration reference](/consul/docs/services/configuration/services-configuration-reference) for information about defining services.
-
-### Register the ingress gateway proxy service
-
-You can register the ingress gateway using API or CLI. Refer to [Register services and health checks](/consul/docs/services/usage/register-services-checks) for instructions on registering services in Consul.
-
-The following example registers an ingress gateway defined in `ingress-gateway.hcl` from the Consul CLI:
-
-```shell-session
-$ consul services register ingress-service.hcl
-```
-
-## Start an Envoy proxy
-
-Run the `consul connect envoy` command to start Envoy. Specify the name of the ingress gateway service and include the `-gateway=ingress` flag. Refer to [Consul Connect Envoy](/consul/commands/connect/envoy) for details about using the command.
-
-The following example starts Envoy for the `ingress-service` gateway service:
-
-```shell-session
-$ consul connect envoy -gateway=ingress ingress-service
-```
-
-### Register an ingress service on Envoy startup
-
-You can also automatically register the ingress gateway service when starting the Envoy proxy. Specify the following flags with the `consul connect envoy` command:
-
-- `-gateway=ingress`
-- `-register`
-- `-service=`
-
-The following example starts Envoy and registers an ingress gateway service named `ingress-service` bound to the agent address at port `8888`:
-
-```shell-session
-$ consul connect envoy -gateway=ingress -register \
- -service ingress-service \
- -address '{{ GetInterfaceIP "eth0" }}:8888'
-```
-You cannot register the ingress gateway service and start the proxy at the same time if you configure the gateway to retrieve and serve TLS certificates from their external downstreams. Refer to [Serve custom TLS certificates from an external service](/consul/docs/connect/gateways/ingress-gateway/tls-external-service) for more information.
-
-## Additional Envoy configurations
-
-Ingress gateways support additional Envoy gateway options and escape-hatch overrides. Specify gateway options in the ingress gateway service definition to use them. To use escape-hatch overrides, you must add them to your global proxy defaults configuration entry. Refer to the following documentation for additional information:
-
-- [Gateway options](/consul/docs/connect/proxies/envoy#gateway-options)
-- [Escape-hatch overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides)
diff --git a/website/content/docs/connect/gateways/mesh-gateway/index.mdx b/website/content/docs/connect/gateways/mesh-gateway/index.mdx
deleted file mode 100644
index efb1bc1066e3..000000000000
--- a/website/content/docs/connect/gateways/mesh-gateway/index.mdx
+++ /dev/null
@@ -1,309 +0,0 @@
----
-layout: docs
-page_title: Mesh Gateways
-description: >-
- Mesh gateways are specialized proxies that route data between services that cannot communicate directly. Learn how mesh gateways are used in different Consul configurations.
----
-
-# Mesh Gateways
-
-Mesh gateways enable service mesh traffic to be routed between different Consul datacenters.
-Datacenters can reside in different clouds or runtime environments where general interconnectivity between all services in all datacenters isn't feasible.
-
-## Prerequisites
-
-Mesh gateways can be used with any of the following Consul configurations for managing separate datacenters or partitions.
-
-1. WAN Federation
- * [Mesh gateways can be used to route service-to-service traffic between datacenters](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters)
- * [Mesh gateways can be used to route all WAN traffic, including from Consul servers](/consul/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways)
-2. Cluster Peering
- * [Mesh gateways can be used to route service-to-service traffic between datacenters](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering)
- * [Mesh gateways can be used to route control-plane traffic from Consul servers](/consul/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways)
-3. Admin Partitions
- * [Mesh gateways can be used to route service-to-service traffic between admin partitions in the same Consul datacenter](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions)
-
-### Consul
-
-Review the [specific guide](#prerequisites) for your use case to determine the required version of Consul.
-
-### Network
-
-* General network connectivity to all services within its local Consul datacenter.
-* General network connectivity to all mesh gateways within remote Consul datacenters.
-
-### Proxy
-
-Envoy is the only proxy with mesh gateway capabilities in Consul.
-
-Mesh gateway proxies receive their configuration through Consul, which automatically generates it based on the proxy's registration.
-Consul can only translate mesh gateway registration information into Envoy configuration.
-
-Sidecar proxies that send traffic to an upstream service through a gateway need to know the location of that gateway. They discover the gateway based on their sidecar proxy registrations. Consul can only translate the gateway registration information into Envoy configuration.
-
-Sidecar proxies that do not send upstream traffic through a gateway are not affected when you deploy gateways. If you are using Consul's built-in proxy as a Connect sidecar it will continue to work for intra-datacenter traffic and will receive incoming traffic even if that traffic has passed through a gateway.
-
-## Configuration
-
-Configure the following settings to register the mesh gateway as a service in Consul.
-
-* Specify `mesh-gateway` in the `kind` field to register the gateway with Consul.
-* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and datacenter. Refer to the [`upstreams` documentation](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.datacenter` must be configured to enable cross-datacenter traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace.
-* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy (i.e., Envoy). For Envoy, refer to the [Gateway Options](/consul/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information.
-* If ACLs are enabled, a token granting `service:write` for the gateway's service name and `service:read` for all services in the datacenter or partition must be added to the gateway's service definition. These permissions authorize the token to route communications for other Consul service mesh services, but does not allow decrypting any of their communications.
-
-### Modes
-
-Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway.
-Depending on your network, the proxy's connection to the gateway can operate in one of the following modes:
-
-* `none` - No gateway is used and a service mesh sidecar proxy makes its outbound connections directly
- to the destination services. This is the default for WAN federation. This setting is invalid for peered clusters
- and will be treated as remote instead.
-
-* `local` - The service mesh sidecar proxy makes an outbound connection to a gateway running in the
- same datacenter. That gateway is responsible for ensuring that the data is forwarded to gateways in the destination datacenter.
-
-* `remote` - The service mesh sidecar proxy makes an outbound connection to a gateway running in the destination datacenter.
- The gateway forwards the data to the final destination service. This is the default for peered clusters.
-
-### Service Mesh Proxy Configuration
-
-Set the proxy to the preferred [mode](#modes) to configure the service mesh proxy. You can specify the mode globally or within child configurations to control proxy behaviors at a lower level. Consul recognizes the following order of precedence if the gateway mode is configured in multiple locations the order of precedence:
-
-1. Upstream definition (highest priority)
-2. Service instance definition
-3. Centralized `service-defaults` configuration entry
-4. Centralized `proxy-defaults` configuration entry
-
-## Example Configurations
-
-Use the following example configurations to help you understand some of the common scenarios.
-
-### Enabling Gateways Globally
-
-The following `proxy-defaults` configuration will enable gateways for all mesh services in the `local` mode.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-```
-
-
-
-### Enabling Gateways Per Service
-
-The following `service-defaults` configuration will enable gateways for all mesh services with the name `web`.
-
-
-
-```hcl
-Kind = "service-defaults"
-Name = "web"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ServiceDefaults
-metadata:
- name: web
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "service-defaults",
- "Name": "web",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-```
-
-
-
-### Enabling Gateways for a Service Instance
-
-The following [proxy service configuration](/consul/docs/connect/proxies/deploy-service-mesh-proxies)
- enables gateways for the service instance in the `remote` mode.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- mesh_gateway {
- mode = "remote"
- }
- upstreams = [
- {
- destination_name = "api"
- datacenter = "secondary"
- local_bind_port = 10000
- }
- ]
- }
-}
-
-# Or alternatively inline with the service definition:
-
-service {
- name = "web"
- port = 8181
- connect {
- sidecar_service {
- proxy {
- mesh_gateway {
- mode = "remote"
- }
- upstreams = [
- {
- destination_name = "api"
- datacenter = "secondary"
- local_bind_port = 10000
- }
- ]
- }
- }
- }
-}
-```
-
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "mesh_gateway": {
- "mode": "remote"
- },
- "upstreams": [
- {
- "destination_name": "api",
- "datacenter": "secondary",
- "local_bind_port": 10000
- }
- ]
- }
- }
-}
-```
-
-
-
-### Enabling Gateways for a Proxy Upstream
-
-The following service definition will enable gateways in the `local` mode for one upstream, the `remote` mode for a second upstream and will disable gateways for a third upstream.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- upstreams = [
- {
- destination_name = "api"
- destination_peer = "cluster-01"
- local_bind_port = 10000
- mesh_gateway {
- mode = "remote"
- }
- },
- {
- destination_name = "db"
- datacenter = "secondary"
- local_bind_port = 10001
- mesh_gateway {
- mode = "local"
- }
- },
- {
- destination_name = "logging"
- datacenter = "secondary"
- local_bind_port = 10002
- mesh_gateway {
- mode = "none"
- }
- },
- ]
- }
-}
-```
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "upstreams": [
- {
- "destination_name": "api",
- "local_bind_port": 10000,
- "mesh_gateway": {
- "mode": "remote"
- }
- },
- {
- "destination_name": "db",
- "local_bind_port": 10001,
- "mesh_gateway": {
- "mode": "local"
- }
- },
- {
- "destination_name": "logging",
- "local_bind_port": 10002,
- "mesh_gateway": {
- "mode": "none"
- }
- }
- ]
- }
- }
-}
-```
-
-
diff --git a/website/content/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways.mdx b/website/content/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways.mdx
deleted file mode 100644
index 97334950f62a..000000000000
--- a/website/content/docs/connect/gateways/mesh-gateway/peering-via-mesh-gateways.mdx
+++ /dev/null
@@ -1,139 +0,0 @@
----
-layout: docs
-page_title: Enabling Peering Control Plane Traffic
-description: >-
- Mesh gateways are specialized proxies that route data between services that cannot communicate directly. Learn how to enable traffic across clusters in different datacenters or admin partitions that have an established peering connection.
----
-
-# Enabling Peering Control Plane Traffic
-
-This topic describes how to configure a mesh gateway to route control plane traffic between Consul clusters that share a peer connection. For information about routing service traffic between cluster peers through a mesh gateway, refer to [Enabling Service-to-service Traffic Across Admin Partitions](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions).
-
-Control plane traffic between cluster peers includes
-the initial secret handshake and the bi-directional stream replicating peering data.
-This data is not decrypted by the mesh gateway(s).
-Instead, it is transmitted end-to-end using the accepting cluster’s auto-generated TLS certificate on the gRPC TLS port.
-
-
-
-
-[](/img/consul-connect/mesh-gateway/cluster-peering-connectivity-with-mesh-gateways.png)
-
-
-
-
-
-[](/img/consul-connect/mesh-gateway/cluster-peering-connectivity-without-mesh-gateways.png)
-
-
-
-
-## Prerequisites
-
-To configure mesh gateways for cluster peering control plane traffic, make sure your Consul environment meets the following requirements:
-
-- Consul version 1.14.0 or newer.
-- A local Consul agent in both clusters is required to manage mesh gateway configuration.
-- Use [Envoy proxies](/consul/docs/connect/proxies/envoy). Envoy is the only proxy with mesh gateway capabilities in Consul.
-
-## Configuration
-
-Configure the following settings to register and use the mesh gateway as a service in Consul.
-
-### Gateway registration
-
-Register a mesh gateway in each of cluster that will be peered.
-
-- Specify `mesh-gateway` in the `kind` field to register the gateway with Consul.
-- Define the `Proxy.Config` settings using opaque parameters compatible with your proxy. For Envoy, refer to the [Gateway Options](/consul/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information.
-- Apply a [Mesh config entry](/consul/docs/connect/config-entries/mesh#peer-through-mesh-gateways) with `PeerThroughMeshGateways = true`. See [modes](#modes) for a discussion of when to apply this.
-
-Alternatively, you can also use the CLI to spin up and register a gateway in Consul. For additional information, refer to the [`consul connect envoy` command](/consul/commands/connect/envoy#mesh-gateways).
-
-For Consul Enterprise clusters, mesh gateways must be registered in the "default" partition because this is implicitly where Consul servers are assigned.
-
-### ACL configuration
-
-
-
-
-In addition to the [ACL Configuration](/consul/docs/connect/cluster-peering/tech-specs#acl-specifications) necessary for service-to-service traffic, mesh gateways that route peering control plane traffic must be granted `peering:read` access to all peerings.
-
-This access allows the mesh gateway to list all peerings in a Consul cluster and generate unique routing per peered datacenter.
-
-
-
-```hcl
-peering = "read"
-```
-
-```json
-{
- "peering": "read"
-}
-```
-
-
-
-
-
-
-
-In addition to the [ACL Configuration](/consul/docs/connect/cluster-peering/tech-specs#acl-specifications) necessary for service-to-service traffic, mesh gateways that route peering control plane traffic must be granted `peering:read` access to all peerings in all partitions.
-
-This access allows the mesh gateway to list all peerings in a Consul cluster and generate unique routing per peered partition.
-
-
-
-```hcl
-partition_prefix "" {
- peering = "read"
-}
-```
-
-```json
-{
- "partition_prefix": {
- "": {
- "peering": "read"
- }
- }
-}
-```
-
-
-
-
-
-
-### Modes
-
-Connect proxy configuration [Modes](/consul/docs/connect/gateways/mesh-gateway#connect-proxy-configuration#modes) are not applicable to peering control plane traffic.
-The flow of control plane traffic through the gateway is implied by the presence of a [Mesh config entry](/consul/docs/connect/config-entries/mesh#peer-through-mesh-gateways) with `PeerThroughMeshGateways = true`.
-
-
-
-```hcl
-Kind = "mesh"
-Peering {
- PeerThroughMeshGateways = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: Mesh
-metadata:
- name: mesh
-spec:
- peering:
- peerThroughMeshGateways: true
-```
-
-
-By setting this mesh config on a cluster before [creating a peering token](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering#create-a-peering-token), inbound control plane traffic will be sent through the mesh gateway registered this cluster, also known the accepting cluster.
-As mesh gateway instances are registered at the accepting cluster, their addresses will be exposed to the dialing cluster over the bi-directional peering stream.
-
-Setting this mesh config on a cluster before [establishing a connection](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering#establish-a-connection-between-clusters) will cause the outbound control plane traffic to flow through the mesh gateway.
-
-To route all peering control plane traffic though mesh gateways, both the accepting and dialing cluster must have the mesh config entry applied.
diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx
deleted file mode 100644
index 4c7fe3ba2aa1..000000000000
--- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx
+++ /dev/null
@@ -1,292 +0,0 @@
----
-layout: docs
-page_title: Enabling Service-to-service Traffic Across Admin Partitions
-description: >-
- Mesh gateways are specialized proxies that route data between services that cannot communicate directly with upstreams. Learn how to enable service-to-service traffic across admin partitions and review example configuration entries.
----
-
-# Enabling Service-to-service Traffic Across Admin Partitions
-
--> **Consul Enterprise 1.11.0+:** Admin partitions are supported in Consul Enterprise versions 1.11.0 and newer.
-
-Mesh gateways enable you to route service mesh traffic between different Consul [admin partitions](/consul/docs/enterprise/admin-partitions).
-Partitions can reside in different clouds or runtime environments where general interconnectivity between all services
-in all partitions isn't feasible.
-
-Mesh gateways operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested. The gateway does not decrypt the data within the mTLS session.
-
-## Prerequisites
-
-Ensure that your Consul environment meets the following requirements.
-
-### Consul
-
-* Consul Enterprise version 1.11.0 or newer.
-* A local Consul agent is required to manage its configuration.
-* Consul service mesh must be enabled in all partitions. Refer to the [`connect` documentation](/consul/docs/agent/config/config-files#connect) for details.
-* Each partition must have a unique name. Refer to the [admin partitions documentation](/consul/docs/enterprise/admin-partitions) for details.
-* If you want to [enable gateways globally](/consul/docs/connect/gateways/mesh-gateway#enabling-gateways-globally) you must enable [centralized configuration](/consul/docs/agent/config/config-files#enable_central_service_config).
-
-### Proxy
-
-Envoy is the only proxy with mesh gateway capabilities in Consul.
-
-Mesh gateway proxies receive their configuration through Consul, which automatically generates it based on the proxy's registration.
-Consul can only translate mesh gateway registration information into Envoy configuration.
-
-Sidecar proxies that send traffic to an upstream service through a gateway need to know the location of that gateway. They discover the gateway based on their sidecar proxy registrations. Consul can only translate the gateway registration information into Envoy configuration.
-
-Sidecar proxies that do not send upstream traffic through a gateway are not affected when you deploy gateways. If you are using Consul's built-in proxy as a service mesh sidecar it will continue to work for intra-datacenter traffic and will receive incoming traffic even if that traffic has passed through a gateway.
-
-## Configuration
-
-Configure the following settings to register the mesh gateway as a service in Consul.
-
-* Specify `mesh-gateway` in the `kind` field to register the gateway with Consul.
-* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and partition. Refer to the [`upstreams` documentation](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.destination_partition` must be configured to enable cross-partition traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace.
-* Configure the `exported-services` configuration entry to enable Consul to export services contained in an admin partition to one or more additional partitions. Refer to the [Exported Services documentation](/consul/docs/connect/config-entries/exported-services) for details.
-* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy, i.e., Envoy. For Envoy, refer to the [Gateway Options](/consul/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information.
-* If ACLs are enabled, a token granting `service:write` for the gateway's service name and `service:read` for all services in the datacenter or partition must be added to the gateway's service definition. These permissions authorize the token to route communications for other Consul service mesh services, but does not allow decrypting any of their communications.
-
-### Modes
-
-Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway.
-Depending on your network, the proxy's connection to the gateway can operate in one of the following modes:
-
-* `none` - (Default) No gateway is used and a service mesh connect proxy makes its outbound connections directly
- to the destination services.
-
-* `local` - The service mesh connect proxy makes an outbound connection to a gateway running in the same datacenter. The gateway at the outbound connection is responsible for ensuring that the data is forwarded to gateways in the destination partition.
-
-* `remote` - The service mesh connect proxy makes an outbound connection to a gateway running in the destination datacenter.
- The gateway forwards the data to the final destination service.
-
-### Service Mesh Proxy Configuration
-
-Set the proxy to the preferred [mode](#modes) to configure the service mesh proxy. You can specify the mode globally or within child configurations to control proxy behaviors at a lower level. Consul recognizes the following order of precedence if the gateway mode is configured in multiple locations the order of precedence:
-
-1. Upstream definition (highest priority)
-2. Service instance definition
-3. Centralized `service-defaults` configuration entry
-4. Centralized `proxy-defaults` configuration entry
-
-## Example Configurations
-
-Use the following example configurations to help you understand some of the common scenarios.
-
-### Enabling Gateways Globally
-
-The following `proxy-defaults` configuration will enable gateways for all mesh services in the `local` mode.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-```
-
-
-
-### Enabling Gateways Per Service
-
-The following `service-defaults` configuration will enable gateways for all mesh services with the name `web`.
-
-
-
-```hcl
-Kind = "service-defaults"
-Name = "web"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ServiceDefaults
-metadata:
- name: web
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "service-defaults",
- "Name": "web",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-```
-
-
-
-### Enabling Gateways for a Service Instance
-
-The following [proxy service configuration](/consul/docs/connect/proxies/deploy-service-mesh-proxies)
-enables gateways for `web` service instances in the `finance` partition.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- mesh_gateway {
- mode = "local"
- }
- upstreams = [
- {
- destination_partition = "finance"
- destination_namespace = "default"
- destination_type = "service"
- destination_name = "billing"
- local_bind_port = 9090
- }
- ]
- }
-}
-```
-
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "mesh_gateway": {
- "mode": "local"
- },
- "upstreams": [
- {
- "destination_name": "billing",
- "destination_namespace": "default",
- "destination_partition": "finance",
- "destination_type": "service",
- "local_bind_port": 9090
- }
- ]
- }
- }
-}
-```
-
-
-### Enabling Gateways for a Proxy Upstream
-
-The following service definition will enable gateways in `local` mode for three different partitions. Note that each service exists in the same namespace, but are separated by admin partition.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- upstreams = [
- {
- destination_name = "api"
- destination_namespace = "dev"
- destination_partition = "api"
- local_bind_port = 10000
- mesh_gateway {
- mode = "local"
- }
- },
- {
- destination_name = "db"
- destination_namespace = "dev"
- destination_partition = "db"
- local_bind_port = 10001
- mesh_gateway {
- mode = "local"
- }
- },
- {
- destination_name = "logging"
- destination_namespace = "dev"
- destination_partition = "logging"
- local_bind_port = 10002
- mesh_gateway {
- mode = "local"
- }
- },
- ]
- }
-}
-```
-
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "upstreams": [
- {
- "destination_name": "api",
- "destination_namespace": "dev",
- "destination_partition": "api",
- "local_bind_port": 10000,
- "mesh_gateway": {
- "mode": "local"
- }
- },
- {
- "destination_name": "db",
- "destination_namespace": "dev",
- "destination_partition": "db",
- "local_bind_port": 10001,
- "mesh_gateway": {
- "mode": "local"
- }
- },
- {
- "destination_name": "logging",
- "destination_namespace": "dev",
- "destination_partition": "logging",
- "local_bind_port": 10002,
- "mesh_gateway": {
- "mode": "local"
- }
- }
- ]
- }
- }
-}
-```
-
diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters.mdx
deleted file mode 100644
index d9df2de8f18c..000000000000
--- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters.mdx
+++ /dev/null
@@ -1,313 +0,0 @@
----
-layout: docs
-page_title: Enabling Service-to-service Traffic Across WAN Federated Datacenters
-description: >-
- Mesh gateways are specialized proxies that route data between services that cannot communicate directly. Learn how to enable service-to-service traffic across wan-federated datacenters and review example configuration entries.
----
-
-# Enabling Service-to-service Traffic Across WAN Federated Datacenters
-
--> **1.6.0+:** This feature is available in Consul versions 1.6.0 and newer.
-
-Mesh gateways enable service mesh traffic to be routed between different Consul datacenters.
-Datacenters can reside in different clouds or runtime environments where general interconnectivity between all services
-in all datacenters isn't feasible.
-
-Mesh gateways operate by sniffing and extracting the server name indication (SNI) header from the service mesh session and routing the connection to the appropriate destination based on the server name requested. The gateway does not decrypt the data within the mTLS session.
-
-The following diagram describes the architecture for using mesh gateways for cross-datacenter communication:
-
-
-
--> **Mesh Gateway Tutorial**: Follow the [mesh gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways) to learn important concepts associated with using mesh gateways for connecting services across datacenters.
-
-## Prerequisites
-
-Ensure that your Consul environment meets the following requirements.
-
-### Consul
-
-* Consul version 1.6.0 or newer.
-* A local Consul agent is required to manage its configuration.
-* Consul [service mesh](/consul/docs/agent/config/config-files#connect) must be enabled in both datacenters.
-* Each [datacenter](/consul/docs/agent/config/config-files#datacenter) must have a unique name.
-* Each datacenters must be [WAN joined](/consul/tutorials/networking/federation-gossip-wan).
-* The [primary datacenter](/consul/docs/agent/config/config-files#primary_datacenter) must be set to the same value in both datacenters. This specifies which datacenter is the authority for service mesh certificates and is required for services in all datacenters to establish mutual TLS with each other.
-* [gRPC](/consul/docs/agent/config/config-files#grpc_port) must be enabled.
-* If you want to [enable gateways globally](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters#enabling-gateways-globally) you must enable [centralized configuration](/consul/docs/agent/config/config-files#enable_central_service_config).
-
-### Network
-
-* General network connectivity to all services within its local Consul datacenter.
-* General network connectivity to all mesh gateways within remote Consul datacenters.
-
-### Proxy
-
-Envoy is the only proxy with mesh gateway capabilities in Consul.
-
-Mesh gateway proxies receive their configuration through Consul, which automatically generates it based on the proxy's registration.
-Consul can only translate mesh gateway registration information into Envoy configuration.
-
-Sidecar proxies that send traffic to an upstream service through a gateway need to know the location of that gateway. They discover the gateway based on their sidecar proxy registrations. Consul can only translate the gateway registration information into Envoy configuration.
-
-Sidecar proxies that do not send upstream traffic through a gateway are not affected when you deploy gateways. If you are using Consul's built-in proxy as a service mesh sidecar it will continue to work for intra-datacenter traffic and will receive incoming traffic even if that traffic has passed through a gateway.
-
-## Configuration
-
-Configure the following settings to register the mesh gateway as a service in Consul.
-
-* Specify `mesh-gateway` in the `kind` field to register the gateway with Consul.
-* Configure the `proxy.upstreams` parameters to route traffic to the correct service, namespace, and datacenter. Refer to the [`upstreams` documentation](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference) for details. The service `proxy.upstreams.destination_name` is always required. The `proxy.upstreams.datacenter` must be configured to enable cross-datacenter traffic. The `proxy.upstreams.destination_namespace` configuration is only necessary if the destination service is in a different namespace.
-* Define the `Proxy.Config` settings using opaque parameters compatible with your proxy (i.e., Envoy). For Envoy, refer to the [Gateway Options](/consul/docs/connect/proxies/envoy#gateway-options) and [Escape-hatch Overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides) documentation for additional configuration information.
-* If ACLs are enabled, a token granting `service:write` for the gateway's service name and `service:read` for all services in the datacenter or partition must be added to the gateway's service definition. These permissions authorize the token to route communications for other Consul service mesh services, but does not allow decrypting any of their communications.
-
-### Modes
-
-Each upstream associated with a service mesh proxy can be configured so that it is routed through a mesh gateway.
-Depending on your network, the proxy's connection to the gateway can operate in one of the following modes (refer to the [mesh-architecture-diagram](#mesh-architecture-diagram)):
-
-* `none` - (Default) No gateway is used and a service mesh sidecar proxy makes its outbound connections directly
- to the destination services.
-
-* `local` - The service mesh sidecar proxy makes an outbound connection to a gateway running in the
- same datacenter. That gateway is responsible for ensuring that the data is forwarded to gateways in the destination datacenter.
- Refer to the flow labeled `local` in the [mesh-architecture-diagram](#mesh-architecture-diagram).
-
-* `remote` - The service mesh sidecar proxy makes an outbound connection to a gateway running in the destination datacenter.
- The gateway forwards the data to the final destination service.
- Refer to the flow labeled `remote` in the [mesh-architecture-diagram](#mesh-architecture-diagram).
-
-### Service Mesh Proxy Configuration
-
-Set the proxy to the preferred [mode](#modes) to configure the service mesh proxy. You can specify the mode globally or within child configurations to control proxy behaviors at a lower level. Consul recognizes the following order of precedence if the gateway mode is configured in multiple locations the order of precedence:
-
-1. Upstream definition (highest priority)
-2. Service instance definition
-3. Centralized `service-defaults` configuration entry
-4. Centralized `proxy-defaults` configuration entry
-
-## Example Configurations
-
-Use the following example configurations to help you understand some of the common scenarios.
-
-### Enabling Gateways Globally
-
-The following `proxy-defaults` configuration will enable gateways for all mesh services in the `local` mode.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-```
-
-
-### Enabling Gateways Per Service
-
-The following `service-defaults` configuration will enable gateways for all mesh services with the name `web`.
-
-
-
-```hcl
-Kind = "service-defaults"
-Name = "web"
-MeshGateway {
- Mode = "local"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ServiceDefaults
-metadata:
- name: web
-spec:
- meshGateway:
- mode: local
-```
-
-```json
-{
- "Kind": "service-defaults",
- "Name": "web",
- "MeshGateway": {
- "Mode": "local"
- }
-}
-
-
-
-### Enabling Gateways for a Service Instance
-
-The following [proxy service configuration](/consul/docs/connect/proxies/deploy-service-mesh-proxies)
-enables gateways for the service instance in the `remote` mode.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- mesh_gateway {
- mode = "remote"
- }
- upstreams = [
- {
- destination_name = "api"
- datacenter = "secondary"
- local_bind_port = 10000
- }
- ]
- }
-}
-
-# Or alternatively inline with the service definition:
-
-service {
- name = "web"
- port = 8181
- connect {
- sidecar_service {
- proxy {
- mesh_gateway {
- mode = "remote"
- }
- upstreams = [
- {
- destination_name = "api"
- datacenter = "secondary"
- local_bind_port = 10000
- }
- ]
- }
- }
- }
-}
-```
-
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "mesh_gateway": {
- "mode": "remote"
- },
- "upstreams": [
- {
- "destination_name": "api",
- "datacenter": "secondary",
- "local_bind_port": 10000
- }
- ]
- }
- }
-}
-```
-
-
-
-### Enabling Gateways for a Proxy Upstream
-
-The following service definition will enable gateways in the `local` mode for one upstream, the `remote` mode for a second upstream and will disable gateways for a third upstream.
-
-
-
-```hcl
-service {
- name = "web-sidecar-proxy"
- kind = "connect-proxy"
- port = 8181
- proxy {
- destination_service_name = "web"
- upstreams = [
- {
- destination_name = "api"
- local_bind_port = 10000
- mesh_gateway {
- mode = "remote"
- }
- },
- {
- destination_name = "db"
- local_bind_port = 10001
- mesh_gateway {
- mode = "local"
- }
- },
- {
- destination_name = "logging"
- local_bind_port = 10002
- mesh_gateway {
- mode = "none"
- }
- },
- ]
- }
-}
-```
-```json
-{
- "service": {
- "kind": "connect-proxy",
- "name": "web-sidecar-proxy",
- "port": 8181,
- "proxy": {
- "destination_service_name": "web",
- "upstreams": [
- {
- "destination_name": "api",
- "local_bind_port": 10000,
- "mesh_gateway": {
- "mode": "remote"
- }
- },
- {
- "destination_name": "db",
- "local_bind_port": 10001,
- "mesh_gateway": {
- "mode": "local"
- }
- },
- {
- "destination_name": "logging",
- "local_bind_port": 10002,
- "mesh_gateway": {
- "mode": "none"
- }
- }
- ]
- }
- }
-}
-```
-
diff --git a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx b/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx
deleted file mode 100644
index d637a8f13461..000000000000
--- a/website/content/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx
+++ /dev/null
@@ -1,200 +0,0 @@
----
-layout: docs
-page_title: Enabling WAN Federation Control Plane Traffic
-description: >-
- You can use mesh gateways to simplify the networking requirements for WAN federated Consul datacenters. Mesh gateways reduce cross-datacenter connection paths, ports, and communication protocols.
----
-
-# Enabling WAN Federation Control Plane Traffic
-
--> **1.8.0+:** This feature is available in Consul versions 1.8.0 and higher
-
-~> This topic requires familiarity with [mesh gateways](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters).
-
-WAN federation via mesh gateways allows for Consul servers in different datacenters
-to be federated exclusively through mesh gateways.
-
-When setting up a
-[multi-datacenter](/consul/tutorials/networking/federation-gossip-wan)
-Consul cluster, operators must ensure that all Consul servers in every
-datacenter must be directly connectable over their WAN-advertised network
-address from each other.
-
-[](/img/wan-federation-connectivity-traditional.png)
-
-This requires that operators setting up the virtual machines or containers
-hosting the servers take additional steps to ensure the necessary routing and
-firewall rules are in place to allow the servers to speak to each other over
-the WAN.
-
-Sometimes this prerequisite is difficult or undesirable to meet:
-
-- **Difficult:** The datacenters may exist in multiple Kubernetes clusters that
- unfortunately have overlapping pod IP subnets, or may exist in different
- cloud provider VPCs that have overlapping subnets.
-
-- **Undesirable:** Network security teams may not approve of granting so many
- firewall rules. When using platform autoscaling, keeping rules up to date becomes untenable.
-
-Operators looking to simplify their WAN deployment and minimize the exposed
-security surface area can elect to join these datacenters together using [mesh
-gateways](/consul/docs/connect/gateways/mesh-gateway/service-to-service-traffic-wan-datacenters) to do so.
-
-[](/img/wan-federation-connectivity-mesh-gateways.png)
-
-## Architecture
-
-There are two main kinds of communication that occur over the WAN link spanning
-the gulf between disparate Consul datacenters:
-
-- **WAN gossip:** We leverage the serf and memberlist libraries to gossip
- around failure detector knowledge about Consul servers in each datacenter.
- By default this operates point to point between servers over `8302/udp` with
- a fallback to `8302/tcp` (which logs a warning indicating the network is
- misconfigured).
-
-- **Cross-datacenter RPCs:** Consul servers expose a special multiplexed port
- over `8300/tcp`. Several distinct kinds of messages can be received on this
- port, such as RPC requests forwarded from servers in other datacenters.
-
-In this network topology individual Consul client agents on a LAN in one
-datacenter never need to directly dial servers in other datacenters. This
-means you could introduce a set of firewall rules prohibiting `10.0.0.0/24`
-from sending any traffic at all to `10.1.2.0/24` for security isolation.
-
-You may already have configured [mesh
-gateways](/consul/tutorials/developer-mesh/service-mesh-gateways)
-to allow for services in the service mesh to freely connect between datacenters
-regardless of the lateral connectivity of the nodes hosting the Consul client
-agents.
-
-By activating WAN federation via mesh gateways the servers
-can similarly use the existing mesh gateways to reach each other without
-themselves being directly reachable.
-
-## Configuration
-
-### TLS
-
-All Consul servers in all datacenters should have TLS configured with certificates containing
-these SAN fields:
-
- server.. (normal)
- .server.. (needed for wan federation)
-
-This can be achieved using any number of tools, including `consul tls cert create` with the `-node` flag.
-
-### Mesh Gateways
-
-There needs to be at least one mesh gateway configured to opt-in to exposing
-the servers in its configuration. When using the `consul connect envoy` CLI
-this is done by using the flag `-expose-servers`. All this does is to register
-the mesh gateway into the catalog with the additional piece of service metadata
-of `{"consul-wan-federation":"1"}`. If you are registering the mesh gateways
-into the catalog out of band you may simply add this to your existing
-registration payload.
-
-!> Before activating the feature on an existing cluster you should ensure that
-there is at least one mesh gateway prepared to expose the servers registered in
-each datacenter otherwise the WAN will become only partly connected.
-
-### Consul Server Options
-
-There are a few necessary additional pieces of configuration beyond those
-required for standing up a
-[multi-datacenter](/consul/tutorials/networking/federation-gossip-wan)
-Consul cluster.
-
-Consul servers in the _primary_ datacenter should add this snippet to the
-configuration file:
-
-```hcl
-connect {
- enabled = true
- enable_mesh_gateway_wan_federation = true
-}
-```
-
-Consul servers in all _secondary_ datacenters should add this snippet to the
-configuration file:
-
-```hcl
-primary_gateways = [ ":", ... ]
-connect {
- enabled = true
- enable_mesh_gateway_wan_federation = true
-}
-```
-
-The [`retry_join_wan`](/consul/docs/agent/config/config-files#retry_join_wan) addresses are
-only used for the [traditional federation process](/consul/docs/k8s/deployment-configurations/multi-cluster#traditional-wan-federation).
-They must be omitted when federating Consul servers via gateways.
-
--> The `primary_gateways` configuration can also use `go-discover` syntax just
-like `retry_join_wan`.
-
-### Bootstrapping
-
-For ease of debugging (such as avoiding a flurry of misleading error messages)
-when intending to activate WAN federation via mesh gateways it is best to
-follow this general procedure:
-
-### New secondary
-
-1. Upgrade to the desired version of the consul binary for all servers,
- clients, and CLI.
-2. Start all consul servers and clients on the new version in the primary
- datacenter.
-3. Ensure the primary datacenter has at least one running, registered mesh gateway with
- the service metadata key of `{"consul-wan-federation":"1"}` set.
-4. Ensure you are _prepared_ to launch corresponding mesh gateways in all
- secondaries. When ACLs are enabled actually registering these requires
- upstream connectivity to the primary datacenter to authorize catalog
- registration.
-5. Ensure all servers in the primary datacenter have updated configuration and
- restart.
-6. Ensure all servers in the secondary datacenter have updated configuration.
-7. Start all consul servers and clients on the new version in the secondary
- datacenter.
-8. When ACLs are enabled, shortly afterwards it should become possible to
- resolve ACL tokens from the secondary, at which time it should be possible
- to launch the mesh gateways in the secondary datacenter.
-
-### Existing secondary
-
-1. Upgrade to the desired version of the consul binary for all servers,
- clients, and CLI.
-2. Restart all consul servers and clients on the new version.
-3. Ensure each datacenter has at least one running, registered mesh gateway with the
- service metadata key of `{"consul-wan-federation":"1"}` set.
-4. Ensure all servers in the primary datacenter have updated configuration and
- restart.
-5. Ensure all servers in the secondary datacenter have updated configuration and
- restart.
-
-### Verification
-
-From any two datacenters joined together double check the following give you an
-expected result:
-
-- Check that `consul members -wan` lists all servers in all datacenters with
- their _local_ ip addresses and are listed as `alive`.
-
-- Ensure any API request that activates datacenter request forwarding. such as
- [`/v1/catalog/services?dc=`](/consul/api-docs/catalog#dc-1)
- succeeds.
-
-### Upgrading the primary gateways
-
-Once federation is established, secondary datacenters will continuously request
-updated mesh gateway addresses from the primary datacenter. Consul routes the requests
- through the primary datacenter's mesh gateways. This is because
-secondary datacenters cannot directly dial the primary datacenter's Consul servers.
-If the primary gateways are upgraded, and their previous instances are decommissioned
-before the updates are propagated, then the primary datacenter will become unreachable.
-
-To safely upgrade primary gateways, we recommend that you apply one of the following policies:
-- Avoid decommissioning primary gateway IP addresses. This is because the [primary_gateways](/consul/docs/agent/config/config-files#primary_gateways) addresses configured on the secondary servers act as a fallback mechanism for re-establishing connectivity to the primary.
-
-- Verify that addresses of the new mesh gateways in the primary were propagated
-to the secondary datacenters before decommissioning the old mesh gateways in the primary.
diff --git a/website/content/docs/connect/gateways/terminating-gateway.mdx b/website/content/docs/connect/gateways/terminating-gateway.mdx
deleted file mode 100644
index 86014850eb20..000000000000
--- a/website/content/docs/connect/gateways/terminating-gateway.mdx
+++ /dev/null
@@ -1,130 +0,0 @@
----
-layout: docs
-page_title: Terminating Gateway | Service Mesh
-description: >-
- Terminating gateways send requests from inside the service mesh to external network locations and services outside the mesh. Learn about requirements and terminating gateway interactions with Consul's service catalog.
----
-
-# Terminating Gateways
-
--> **1.8.0+:** This feature is available in Consul versions 1.8.0 and newer.
-
-Terminating gateways enable connectivity within your organizational network from services in the Consul service mesh to
-services and [destinations](/consul/docs/connect/config-entries/service-defaults#terminating-gateway-destination) outside the mesh. These gateways effectively act as service mesh proxies that can
-represent more than one service. They terminate service mesh mTLS connections, enforce intentions,
-and forward requests to the appropriate destination.
-
-
-
-For additional use cases and usage patterns, review the tutorial for
-[understanding terminating gateways](/consul/tutorials/developer-mesh/service-mesh-terminating-gateways?utm_source=docs).
-
-~> **Known limitations:** Terminating gateways currently do not support targeting service subsets with
-[L7 configuration](/consul/docs/connect/manage-traffic). They route to all instances of a service with no capabilities
-for filtering by instance.
-
-## Security Considerations
-
-~> We recommend that terminating gateways are not exposed to the WAN or open internet. This is because terminating gateways
-hold certificates to decrypt Consul service mesh traffic directed at them and may be configured with credentials to connect
-to linked services. Connections over the WAN or open internet should flow through [mesh gateways](/consul/docs/connect/gateways/mesh-gateway)
-whenever possible since they are not capable of decrypting traffic or connecting directly to services.
-
-By specifying a path to a [CA file](/consul/docs/connect/config-entries/terminating-gateway#cafile) connections
-from the terminating gateway will be encrypted using one-way TLS authentication. If a path to a
-[client certificate](/consul/docs/connect/config-entries/terminating-gateway#certfile)
-and [private key](/consul/docs/connect/config-entries/terminating-gateway#keyfile) are also specified connections
-from the terminating gateway will be encrypted using mutual TLS authentication.
-
-If none of these are provided, Consul will **only** encrypt connections to the gateway and not
-from the gateway to the destination service.
-
-When certificates for linked services are rotated, the gateway must be restarted to pick up the new certificates from disk.
-To avoid downtime, perform a rolling restart to reload the certificates. Registering multiple terminating gateway instances
-with the same [name](/consul/commands/connect/envoy#service) provides additional fault tolerance
-as well as the ability to perform rolling restarts.
-
--> **Note:** If certificates and keys are configured the terminating gateway will upgrade HTTP connections to TLS.
-Client applications can issue plain HTTP requests even when connecting to servers that require HTTPS.
-
-## Prerequisites
-
-Each terminating gateway needs:
-
-1. A local Consul client agent to manage its configuration.
-2. General network connectivity to services within its local Consul datacenter.
-3. General network connectivity to services and destinations outside the mesh that are part of the gateway services list.
-
-Terminating gateways also require that your Consul datacenters are configured correctly:
-
-- You'll need to use Consul version 1.8.0 or newer.
-- Consul [service mesh](/consul/docs/agent/config/config-files#connect) must be enabled on the datacenter's Consul servers.
-- [gRPC](/consul/docs/agent/config/config-files#grpc_port) must be enabled on all client agents.
-
-Currently, [Envoy](https://www.envoyproxy.io/) is the only proxy with terminating gateway capabilities in Consul.
-
-- Terminating gateway proxies receive their configuration through Consul, which
- automatically generates it based on the gateway's registration. Currently Consul
- can only translate terminating gateway registration information into Envoy
- configuration, therefore the proxies acting as terminating gateways must be Envoy.
-
-Service mesh proxies that send upstream traffic through a gateway aren't
-affected when you deploy terminating gateways. If you are using non-Envoy proxies as
-Service mesh proxies they will continue to work for traffic directed at services linked to
-a terminating gateway as long as they discover upstreams with the
-[/health/connect](/consul/api-docs/health#list-nodes-for-connect-capable-service) endpoint.
-
-## Running and Using a Terminating Gateway
-
-For a complete example of how to enable connections from services in the Consul service mesh to
-services outside the mesh, review the [terminating gateway tutorial](/consul/tutorials/developer-mesh/terminating-gateways-connect-external-services).
-
-## Terminating Gateway Configuration
-
-Terminating gateways are configured in service definitions and registered with Consul like other services, with two exceptions.
-The first is that the [kind](/consul/api-docs/agent/service#kind) must be "terminating-gateway". Second,
-the terminating gateway service definition may contain a `Proxy.Config` entry just like a
-service mesh proxy service, to define opaque configuration parameters useful for the actual proxy software.
-For Envoy there are some supported [gateway options](/consul/docs/connect/proxies/envoy#gateway-options) as well as
-[escape-hatch overrides](/consul/docs/connect/proxies/envoy#escape-hatch-overrides).
-
--> **Note:** If ACLs are enabled, terminating gateways must be registered with a token granting `node:read` on the nodes
-of all services in its configuration entry. The token must also grant `service:write` for the terminating gateway's service name **and**
-the names of all services in the terminating gateway's configuration entry. These privileges will authorize the gateway
-to terminate mTLS connections on behalf of the linked services and then route the traffic to its final destination.
-If the Consul client agent on the gateway's node is not configured to use the default gRPC port, 8502, then the gateway's token
-must also provide `agent:read` for its node's name in order to discover the agent's gRPC port. gRPC is used to expose Envoy's xDS API to Envoy proxies.
-
-You can link services and destinations to a terminating gateway with a `terminating-gateway`
-[configuration entry](/consul/docs/connect/config-entries/terminating-gateway). This config entry can be applied via the
-[CLI](/consul/commands/config/write) or [API](/consul/api-docs/config#apply-configuration).
-
-Gateways with the same name in Consul's service catalog are configured with a single configuration entry.
-This means that additional gateway instances registered with the same name will determine their routing based on the existing configuration entry.
-Adding replicas of a gateway that routes to a particular set of services requires running the
-[envoy subcommand](/consul/commands/connect/envoy#terminating-gateways) on additional hosts and specifying
-the same gateway name with the `service` flag.
-
-~> [Configuration entries](/consul/docs/agent/config-entries) are global in scope. A configuration entry for a gateway name applies
-across all federated Consul datacenters. If terminating gateways in different Consul datacenters need to route to different
-sets of services within their datacenter then the terminating gateways **must** be registered with different names.
-
-The services that the terminating gateway will proxy for must be registered with Consul, even the services outside the mesh. They must also be registered
-in the same Consul datacenter as the terminating gateway. Otherwise the terminating gateway will not be able to
-discover the services' addresses. These services can be registered with a local Consul agent.
-If there is no agent present, the services can be registered [directly in the catalog](/consul/api-docs/catalog#register-entity)
-by sending the registration request to a client or server agent on a different host.
-
-All services registered in the Consul catalog must be associated with a node, even when their node is
-not managed by a Consul client agent. All agent-less services with the same address can be registered under the same node name and address.
-However, ensure that the [node name](/consul/api-docs/catalog#node) for external services registered directly in the catalog
-does not match the node name of any Consul client agent node. If the node name overlaps with the node name of a Consul client agent,
-Consul's [anti-entropy sync](/consul/docs/architecture/anti-entropy) will delete the services registered via the `/catalog/register` HTTP API endpoint.
-
-Service-defaults [destinations](/consul/docs/connect/config-entries/service-defaults#destination) let you
-define endpoints external to the mesh and routable through a terminating gateway in transparent mode.
-After you define a service-defaults configuration entry for each destination, you can use the service-default name as part of the terminating gateway services list.
-If a service and a destination service-defaults have the same name, the terminating gateway will use the service.
-
-For a complete example of how to register external services review the
-[external services tutorial](/consul/tutorials/developer-discovery/service-registration-external-services).
diff --git a/website/content/docs/connect/index.mdx b/website/content/docs/connect/index.mdx
index 6bdc9989f52b..3531850d87b9 100644
--- a/website/content/docs/connect/index.mdx
+++ b/website/content/docs/connect/index.mdx
@@ -1,77 +1,50 @@
---
layout: docs
-page_title: Service Mesh on Consul
+page_title: Connect workloads to Consul service mesh
description: >-
- Consul’s service mesh makes application and microservice networking secure and observable with identity-based authentication, mutual TLS (mTLS) encryption, and explicit service-to-service authorization enforced by sidecar proxies. Learn how Consul’s service mesh works and get started on VMs or Kubernetes.
+ Consul's service mesh makes application and microservice networking secure and observable with identity-based authentication, mutual TLS (mTLS) encryption, and explicit service-to-service authorization enforced by sidecar proxies. Learn how to enable and configure Consul's service mesh and proxies.
---
-# Consul service mesh
-
-Consul service mesh provides service-to-service connection authorization and
-encryption using mutual Transport Layer Security (TLS).
-
-Applications can use [sidecar proxies](/consul/docs/connect/proxies) in a service mesh configuration to
-establish TLS connections for inbound and outbound connections without being aware of the service mesh at all.
-Applications may also [natively integrate with Consul service mesh](/consul/docs/connect/native) for optimal performance and security.
-Consul service mesh can help you secure your services and provide data about service-to-service communications.
-
-The noun _connect_ is used throughout this documentation to refer to the connect
-subsystem that provides Consul's service mesh capabilities.
-Where you encounter the _noun_ connect, it is usually functionality specific to
-service mesh.
-
-Review the video below to learn more about Consul service mesh from HashiCorp's co-founder Armon.
-
-
-
-## Application security
-
-Consul service mesh enables secure deployment best-practices with automatic
-service-to-service encryption, and identity-based authorization.
-Consul uses the registered service identity, rather than IP addresses, to
-enforce access control with [intentions](/consul/docs/connect/intentions). This
-makes it easier to control access and enables services to be
-rescheduled by orchestrators, including Kubernetes and Nomad. Intention
-enforcement is network agnostic, so Consul service mesh works with physical networks, cloud
-networks, software-defined networks, cross-cloud, and more.
-
-## Observability
-
-One of the key benefits of Consul service mesh is the uniform and consistent view it can
-provide of all the services on your network, irrespective of their different
-programming languages and frameworks. When you configure Consul service mesh to use
-sidecar proxies, those proxies see all service-to-service traffic and can
-collect data about it. Consul service mesh can configure Envoy proxies to collect
-layer 7 metrics and export them to tools like Prometheus. Correctly instrumented
-applications can also send open tracing data through Envoy.
-
-## Getting started with Consul service mesh
-
-Complete the following tutorials try Consul service mesh in different environments:
-
-- The [Getting Started with Consul Service Mesh collection](/consul/tutorials/kubernetes-deploy/service-mesh?utm_source=docs)
- walks you through installing Consul as service mesh for Kubernetes using the Helm
- chart, deploying services in the service mesh, and using intentions to secure service
- communications.
-
-- The [Getting Started With Consul for Kubernetes](/consul/tutorials/get-started-kubernetes?utm_source=docs) tutorials guides you through installing Consul on Kubernetes to set up a service mesh for establishing communication between Kubernetes services.
-
-- The [Secure Service-to-Service Communication tutorial](/consul/tutorials/developer-mesh/service-mesh-with-envoy-proxy?utm_source=docs)
- is a simple walk through of connecting two services on your local machine
- and configuring your first intention.
-
-- The [Kubernetes tutorial](/consul/tutorials/kubernetes/kubernetes-minikube?utm_source=docs)
- walks you through configuring Consul service mesh in Kubernetes using the Helm
- chart, and using intentions. You can run the guide on Minikube or an existing
- Kubernetes cluster.
-
-- The [observability tutorial](/consul/tutorials/kubernetes/kubernetes-layer7-observability)
- shows how to deploy a basic metrics collection and visualization pipeline on
- a Minikube or Kubernetes cluster using the official Helm charts for Consul,
- Prometheus, and Grafana.
+# Connect workloads to Consul service mesh
+
+This page provides an overview of Consul's service mesh features and their configuration. Service mesh is enabled by default on Consul server agents.
+
+## Introduction
+
+
+
+In addition to the service discovery operations available to the Consul instance
+that runs on the same node as your workload, you can use Consul to deploy Envoy
+sidecar proxies to control traffic between each service and the rest of the
+network. Consul includes a built-in certificate authority that can enforce mTLS
+encryption between sidecar proxies. Use [Consul configuration entries](/consul/docs/fundamentals/config-entry) to further secure and monitor
+service-to-service communication.
+
+## Service mesh configuration
+
+The `connect` block of a Consul server agent contains the configurations for the CA provider and locality information for the node. Refer to [Service mesh parameters](/consul/docs/reference/agent/configuration-file/service-mesh) for more information.
+
+To learn how to turn the service mesh off or back on again, refer to [enable service mesh](/consul/docs/connect/enable).
+
+## Envoy proxies
+
+Consul includes built-in support for Envoy proxies to manage service mesh operations. Configure behavior for individual proxies, or configure default behavior for proxies according to service identity. For more information about proxies and their specialized operations in the service mesh, refer to [Service mesh proxy overview](/consul/docs/connect/proxy).
+
+## Guidance
+
+Runtime-specific guidance is also available:
+
+- [Connect workloads to service mesh on VMs](/consul/docs/connect/vm)
+- [Connect workloads to service mesh on Kubernetes](/consul/docs/connect/k8s)
+- [Connect workloads to service mesh on ECS](/consul/docs/connect/ecs)
+- [Connect Consul service mesh to AWS Lambda](/consul/docs/connect/lambda)
+- [Connect workloads to service mesh on Nomad](/consul/docs/connect/nomad)
+
+## Debug and troubleshoot
+
+If you experience errors when connecting Consul's service mesh to your workloads, refer to the following resources:
+
+- [Consul service mesh troubleshooting overview](/consul/docs/connect/troubleshoot)
+- [Debug Consul service mesh](/consul/docs/connect/troubleshoot/debug)
+- [Troubleshoot service-to-service
+ communication](/consul/docs/connect/troubleshoot/service-to-service)
diff --git a/website/content/docs/connect/intentions/create-manage-intentions.mdx b/website/content/docs/connect/intentions/create-manage-intentions.mdx
deleted file mode 100644
index 46ec146824fb..000000000000
--- a/website/content/docs/connect/intentions/create-manage-intentions.mdx
+++ /dev/null
@@ -1,178 +0,0 @@
----
-layout: docs
-page_title: Create and manage service intentions
-description: >-
- Learn how to create and manage Consul service mesh intentions using service-intentions config entries, the `consul intentions` command, and `/connect/intentions` API endpoint.
----
-
-# Create and manage intentions
-
-This topic describes how to create and manage service intentions, which are configurations for controlling access between services in the service mesh.
-
-## Overview
-
-You can create single intentions or create them in batches using the Consul API, CLI, or UI. You can also define a service intention configuration entry that sets default intentions for all services in the mesh. Refer to [Service intentions overview](/consul/docs/connect/intentions/) for additional background information about intentions.
-
-## Requirements
-
-- At least two services must be registered in the datacenter.
-- TLS must be enabled to enforce L4 intentions. Refer to [Encryption](/consul/docs/security/encryption) for additional information.
-
-### ACL requirements
-
-Consul grants permissions for creating and managing intentions based on the destination, not the source. When ACLs are enabled, services and operators must present a token linked to a policy that grants read and write permissions to the destination service.
-
-Consul implicitly grants `intentions:read` permissions to destination services when they are configured with `service:read` or `service:write` permissions. This is so that the services can allow or deny inbound connections when they attempt to join the service mesh. Refer to [Service rules](/consul/docs/security/acl/acl-rules#service-rules) for additional information about configuring ACLs for intentions.
-
-The default ACL policy configuration determines the default behavior for intentions. If the policy is set to `deny`, then all connections or requests are denied and you must enable them explicitly. Refer to [`default_policy`](/consul/docs/agent/config/config-files#acl_default_policy) for details.
-
-## Create an intention
-
-You can create and manage intentions one at a time using the Consul API, CLI, or UI You can specify one destination or multiple destinations in a single intention.
-
-### API
-
-Send a `PUT` request to the `/connect/intentions/exact` HTTP API endpoint and specify the following query parameters:
-
-- `source`: Service sending the request
-- `destination`: Service responding to the request
-- `ns`: Namespace of the destination service
-
-For L4 intentions, you must also specify the intention action in the request payload.
-
-The following example creates an intention that allows `web` to send request to `db`:
-
-```shell-session
-$ curl --request PUT \
---data ' { "Action": "allow" } ' \
-http://localhost:8500/v1/connect/intentions/exact\?source\=web\&destination\=db
-```
-
-Refer to the `/connect/intentions/exact` [HTTP API endpoint documentation](/consul/api-docs/connect/intentions) for additional information request payload parameters.
-
-For L7 intentions, specify the `Permissions` in the request payload to configure attributes for dynamically enforcing intentions. In the following example payload, Consul allows HTTP GET requests if the request body is empty:
-
-
-
-```json
-{
- "Permissions": [
- {
- "Action": "allow",
- "HTTP": {
- "Methods": ["GET"],
- "Header": [
- {
- "Name": "Content-Length",
- "Exact": "0"
- }
- ]
- }
- }
- ]
-}
-
-```
-
-
-
-The `Permissions` object specifies a list of permissions for L7 traffic sources. The list contains one or more actions and a set of match criteria for each action. Refer to the [`Sources[].Permissions[]` parameter](/consul/docs/connect/config-entries/service-intentions#sources-permissions) in the service intentions configuration entry reference for configuration details.
-
-To apply the intention, call the endpoint and pass the configuration file containing the attributes to the endpoint:
-
-```shell-session
-$ curl --request PUT \
---data @payload.json \
-http://localhost:8500/v1/connect/intentions/exact\?source\=svc1\&destination\=sv2
-```
-### CLI
-
-Use the `consul intention create` command according to the following syntax to create a new intention:
-
-```shell-session
-$ consul intention create -
-```
-
-The following example creates an intention that allows `web` service instances to connect to `db` service instances:
-
-```shell-session
-$ consul intention create -allow web db
-```
-
-You can use the asterisk (`*`) wildcard to specify multiple destination services. Refer to [Precedence and match order](/consul/docs/connect/intentions/create-manage-intentions#precedence-and-match-order) for additional information.
-
-### Consul UI
-
-1. Log into the Consul UI and choose **Services** from the sidebar menu.
-1. Click on a service and then click the **Intentions* tab.
-1. Click **Create** and choose the source service from the drop-down menu.
-1. You can add an optional description.
-1. Choose one of the following options:
- 1. **Allow**: Allows the source service to send requests to the destination.
- 1. **Deny**: Prevents the source service from sending requests to the destination.
- 1. **Application Aware**: Enables you to specify L7 criteria for dynamically enforcing intentions. Refer to [Configure application aware settings](#configure-application-aware-settings) for additional information.
-1. Click **Save**.
-
-Repeat the procedure as necessary to create additional intentions.
-
-#### Configure application aware settings
-
-You can use the Consul UI to configure L7 permissions.
-
-1. Click **Add permission** to open the permission editor.
-1. Enable the **Allow** or **Deny** option.
-1. You can specify a path, request method, and request headers to match. All criteria must be satisfied for Consul to enforce the permission. Refer to the [`Sources[].Permissions[]` parameter](/consul/docs/connect/config-entries/service-intentions#sources-permissions) in the service intentions configuration entry reference for information about the available configuration fields.
-1. Click **Save**.
-
-Repeat the procedure as necessary to create additional permissions.
-
-## Create multiple intentions
-
-You can create a service intentions configuration entry to specify default intentions for your service mesh. You can specify default settings for L4 or L7 application-aware traffic.
-
-### Define a service intention configuration entry
-
-Configure the following fields:
-
-
-
-
-
-- [`Kind`](/consul/docs/connect/config-entries/service-intentions#kind): Declares the type of configuration entry. Must be set to `service-intentions`.
-- [`Name`](/consul/docs/connect/config-entries/service-intentions#kind): Specifies the name of the destination service for intentions defined in the configuration entry. You can use a wildcard character (*) to set L4 intentions for all services that are not protected by specific intentions. Wildcards are not supported for L7 intentions.
-- [`Sources`](/consul/docs/connect/config-entries/service-intentions#sources): Specifies an unordered list of all intention sources and the authorizations granted to those sources. Consul stores and evaluates the list in reverse order sorted by intention precedence.
-- [`Sources.Action`](/consul/docs/connect/config-entries/service-intentions#sources-action) or [`Sources.Permissions`](/consul/docs/connect/config-entries/service-intentions#sources-permissions): For L4 intentions, set the `Action` field to "allow" or "deny" so that Consul can enforce intentions that match the source service. For L7 intentions, configure the `Permissions` settings, which define a set of application-aware attributes for dynamically matching incoming requests. The `Actions` and `Permissions` settings are mutually exclusive.
-
-
-
-
-
-- [`apiVersion`](/consul/docs/connect/config-entries/service-intentions#apiversion): Specifies the Consul API version. Must be set to `consul.hashicorp.com/v1alpha1`.
-- [`kind`](/consul/docs/connect/config-entries/service-intentions#kind): Declares the type of configuration entry. Must be set to `ServiceIntentions`.
-- [`spec.destination.name`](/consul/docs/connect/config-entries/service-intentions#spec-destination-name): Specifies the name of the destination service for intentions defined in the configuration entry. You can use a wildcard character (*) to set L4 intentions for all services that are not protected by specific intentions. Wildcards are not supported for L7 intentions.
-- [`spec.sources`](/consul/docs/connect/config-entries/service-intentions#spec-sources): Specifies an unordered list of all intention sources and the authorizations granted to those sources. Consul stores and evaluates the list in reverse order sorted by intention precedence.
-- [`spec.sources.action`](/consul/docs/connect/config-entries/service-intentions#spec-sources-action) or [`spec.sources.permissions`](/consul/docs/connect/config-entries/service-intentions#spec-sources-permissions): For L4 intentions, set the `action` field to "allow" or "deny" so that Consul can enforce intentions that match the source service. For L7 intentions, configure the `permissions` settings, which define a set of application-aware attributes for dynamically matching incoming requests. The `actions` and `permissions` settings are mutually exclusive.
-
-
-
-
-
-Refer to the [service intentions configuration entry](/consul/docs/connect/config-entries/service-intentions) reference documentation for details about all configuration options.
-
-Refer to the [example service intentions configurations](/consul/docs/connect/config-entries/service-intentions#examples) for additional guidance.
-
-#### Interaction with other configuration entries
-
-L7 intentions defined in a configuration entry are restricted to destination services
-configured with an HTTP-based protocol as defined in a corresponding
-[service defaults configuration entry](/consul/docs/connect/config-entries/service-defaults)
-or globally in a [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults).
-
-### Apply the service intentions configuration entry
-
-You can apply the configuration entry using the [`consul config` command](/consul/commands/config) or by calling the [`/config` API endpoint](/consul/api-docs/config). In Kubernetes environments, apply the `ServiceIntentions` custom resource definitions (CRD) to implement and manage Consul configuration entries.
-
-Refer to the following topics for details about applying configuration entries:
-
-- [How to Use Configuration Entries](/consul/docs/agent/config-entries)
-- [Custom Resource Definitions for Consul on Kubernetes](/consul/docs/k8s/crds)
diff --git a/website/content/docs/connect/intentions/index.mdx b/website/content/docs/connect/intentions/index.mdx
deleted file mode 100644
index 8d6364638a0a..000000000000
--- a/website/content/docs/connect/intentions/index.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: docs
-page_title: Service mesh intentions overview
-description: >-
- Intentions are access controls that allow or deny incoming requests to services in the mesh.
----
-
-# Service intentions overview
-
-This topic provides overview information about Consul intentions, which are mechanisms that control traffic communication between services in the Consul service mesh.
-
-
-
-## Intention types
-
-Intentions control traffic communication between services at the network layer, also called _L4_ traffic, or the application layer, also called _L7 traffic_. The protocol that the destination service uses to send and receive traffic determines the type of authorization the intention can enforce.
-
-### L4 traffic intentions
-
-If the destination service uses TCP or any non-HTTP-based protocol, then intentions can control traffic based on identities encoded in mTLS certificates. Refer to [Mutual transport layer security (mTLS)](/consul/docs/connect/connect-internals#mutual-transport-layer-security-mtls) for additional information.
-
-This implementation allows broad all-or-nothing access control between pairs of services. The only requirement is that the service is aware of the TLS handshake that wraps the opaque TCP connection.
-
-### L7 traffic intentions
-
-If the destination service uses an HTTP-based protocol, then intentions can enforce access based on application-aware request attributes, in addition to identity-based enforcement, to control traffic between services. Refer to [Service intentions configuration reference](/consul/docs/connect/config-entries/service-intentions#permissions) for additional information.
-
-## Workflow
-
-You can manually create intentions from the Consul UI, API, or CLI. You can also enable Consul to dynamically create them by defining traffic routes in service intention configuration entries. Refer to [Create and manage intentions](/consul/docs/connect/intentions/create-manage-intentions) for details.
-
-### Enforcement
-
-The [proxy](/consul/docs/connect/proxies) or [natively-integrated
-application](/consul/docs/connect/native) enforces intentions on inbound connections or requests. Only one intention can control authorization between a pair of services at any single point in time.
-
-L4 intentions mediate the ability to establish new connections. Modifying an intention does not have an effect on existing connections. As a result, changing a connection from `allow` to `deny` does not sever the connection.
-
-L7 intentions mediate the ability to issue new requests. When an intention is modified, requests received after the modification use the latest intention rules to enforce access. Changing a connection from `allow` to `deny` does not sever the connection, but doing so blocks new requests from being processed.
-
-When using L7 intentions, we recommend that you review and update the [Mesh request normalization configuration](/consul/docs/connect/security#request-normalization-and-configured) to avoid unintended match rule circumvention. More details are available in the [Mesh configuration entry reference](/consul/docs/connect/config-entries/mesh#request-normalization).
-
-When you use L7 intentions with header matching and it is possible for a header to contain multiple values, we recommend using `contains` or `regex` instead of `exact`, `prefix`, or `suffix`. For more information, refer to the [service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions#spec-sources-permissions-http-header).
-
-### Caching
-
-The intentions for services registered with a Consul agent are cached locally on the agent. Supported proxies also cache intention data in their own configurations so that they can authorize inbound connections or requests without relying on the Consul agent. All actions in the data path of connections take place within the proxy.
-
-### Updates
-
-Consul propagates updates to intentions almost instantly as a result of the continuous blocking query the agent uses. A _blocking query_ is a Consul API feature that uses long polling to wait for potential changes. Refer to [Blocking Queries](/consul/api-docs/features/blocking) for additional information. Proxies also use blocking queries to quickly update their local configurations.
-
-Because all intention data is cached locally, authorizations for inbound connection persist, even if the agents are completely severed from the Consul servers or if the proxies are completely severed from their local Consul agent. If the connection is severed, Consul automatically applies changes to intentions when connectivity is restored.
-
-### Intention maintenance
-
-Services should periodically call the [intention match API](/consul/api-docs/connect/intentions#list-matching-intentions) to retrieve all relevant intentions for the target destination. After verifying the TLS client certificate, the cached intentions for each incoming connection or request determine if it should be accepted or rejected.
-
-## Precedence and match order
-
-Consul processes criteria defined in the service intention configuration entry to match incoming requests. When Consul finds a match, it applies the corresponding action specified in the configuration entry. The match criteria may include specific HTTP headers, request methods, or other attributes. Additionally, you can use regular expressions to programmatically match attributes. Refer to [Service intention configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for details.
-
-Consul orders the matches based the following factors:
-
-- Specificity: Incoming requests that match attributes directly have the highest precedence. For example, intentions that are configured to deny traffic from services that send `POST` requests take precedence over intentions that allow traffic from methods configured with the wildcard value `*`.
-- Authorization: Consul enforces `deny` over `allow` if match criteria are weighted equally.
-
-The following table shows match precedence in descending order:
-
-| Precedence | Source Namespace | Source Name | Destination Namespace | Destination Name |
-| -----------| ---------------- | ------------| --------------------- | ---------------- |
-| 9 | Exact | Exact | Exact | Exact |
-| 8 | Exact | `*` | Exact | Exact |
-| 7 | `*` | `*` | Exact | Exact |
-| 6 | Exact | Exact | Exact | `*` |
-| 5 | Exact | `*` | Exact | `*` |
-| 4 | `*` | `*` | Exact | `*` |
-| 3 | Exact | Exact | `*` | `*` |
-| 2 | Exact | `*` | `*` | `*` |
-| 1 | `*` | `*` | `*` | `*` |
-
-Consul prints the precedence value to the service intentions configuration entry after it processes the matching criteria. The value is read-only. Refer to
-[`Precedence`](/consul/docs/connect/config-entries/service-intentions#precedence) for additional information.
-
-Namespaces are an Enterprise feature. In Consul CE, the only allowable value for either namespace field is `"default"`. Other rows in the table are not applicable.
-
-The [intention match API](/consul/api-docs/connect/intentions#list-matching-intentions)
-should be periodically called to retrieve all relevant intentions for the
-target destination. After verifying the TLS client certificate, the cached
-intentions should be consulted for each incoming connection/request to
-determine if it should be accepted or rejected.
-
-The default intention behavior is defined by the [`default_policy`](/consul/docs/agent/config/config-files#acl_default_policy) configuration.
-If the configuration is set `allow`, then all service-to-service connections in the mesh will be allowed by default.
-If is set to `deny`, then all connections or requests will be denied by default.
diff --git a/website/content/docs/connect/intentions/jwt-authorization.mdx b/website/content/docs/connect/intentions/jwt-authorization.mdx
deleted file mode 100644
index 1c1c0f994e89..000000000000
--- a/website/content/docs/connect/intentions/jwt-authorization.mdx
+++ /dev/null
@@ -1,105 +0,0 @@
----
-page_title: JWT authorization overview
-description: |-
- Consul can use service mesh proxies to check and validate JSON Web Tokens (JWT) to enable additional identify-based access security for both human and machine users. Learn how to configure a JWT provider configuration entry and a service intentions configuration entry to authorize requests.
----
-
-# Use JWT authorization with service intentions
-
-JSON Web Tokens (JWT) are a method for identity-based access to services for both humans and machines. The [JWT provider configuration entry](/consul/docs/connect/config-entries/jwt-provider) enables you to define JWTs as part of a JSON Web Key Set (JWKS), which contains the information necessary for Consul to validate access and configure behavior for requests that include JWTs.
-
-By specifying a JSON Web Key Set (JWKS) in the configuration entry and referencing the key set in a service intention, Consul can enforce service intentions based on the presence of a JWT. This security configuration is not related to the [JSON Web Token Auth Method](/consul/docs/security/acl/auth-methods/jwt), which associates JWTs with the Consul ACLs instead of service intentions.
-
-## Workflow
-
-The process to configure your network to enforce service intentions based on JSON web tokens consists of the following steps:
-
-1. **Create a JWT provider configuration entry**. This configuration entry defines rules and behaviors for verifying tokens. These configurations apply to admin partitions in Consul Enterprise, which is functionally equivalent to a datacenter in Consul CE. Then, write the `jwt-provider` configuration entry to Consul. The ACL policy requirement to read and modify this configuration entry is `mesh:write`.
-
-1. **Create or update a service intentions configuration entry to reference the JWT provider**. This configuration invokes the name of the `jwt-provider` configuration entry you created, which causes the Envoy proxy to verify the token and the permissions it authorizes before the incoming request is accepted. Then, write the `service-intentions` configuration entry that references the JWT to Consul. The ACL policy requirement to read and modify this configuration entry is `mesh:write`.
-
-### Wildcards and intention defaults
-
-Because intentions without tokens are authorized when they arrive at the destination proxy, a [common pattern for the service-intentions configuration entry](/consul/docs/connect/config-entries/service-intentions#l4-intentions-for-all-destinations) sets the entry’s `Name` field as a wildcard, `*`. This pattern enables you to apply incoming requests from specific services to every service in the datacenter.
-
-When configuring your deployment to enforce service intentions with JSON Web Tokens, it is possible for multiple tokens with different permissions to apply to a single service’s incoming request based on attributes such as HTTP path or the request method. Because the `service-intentions` configuration entry applies the intention that most closely matches the request, using the `Name` wildcard with specific JWT authorization configurations can lead to unintended results.
-
-When you set the `JWT{}.Providers` field in a service intentions configuration entry to the wildcard `*`, you can configure default behavior for all services that present a token that matches an existing JWT provider configuration entry. In this configuration, services that have a valid token but do not have a more specific matching intention default to the behavior defined in the wildcard intention.
-
-## Requirements
-
-* **Enable ACLs**. Verify that ACLs are enabled and that the default_policy is set to deny.
-
-## Usage
-
-To configure Envoy proxies in the service mesh to validate JWTs before forwarding requests to servers, complete the following steps:
-
-### Create a JWT provider configuration entry
-
-The `jwt-provider` configuration requires the following fields:
-
-- `Kind`: This field must be set to `"jwt-provider"`
-- `Name`: We recommend naming the configuration file after the JWT provider used in the configuration.
-- `Issuer`: This field must match the token's `iss` claim
-
-You must also specify a JSON Web Key Set in the `JSONWebKeySet` field. You can specify the JWKS as one of the following:
-
-- A local string
-- A path to a local file
-- A remote location specified with a URI
-
-A JWKS can be made available locally or remotely, but not both. In addition, a local JWKS must be specified as either a string or a path to the file containing the token.
-
-You can also specify where the JWT is located, a retry policy, and text to append to the header when forwarding the request after token validation.
-
-The following example configures Consul to fetch a JSON Web Token issued by Okta. Consul fetches the token from the URI and keeps it in its cache for 30 minutes before the token expires. After validation, the token is forwarded to the backend with `user-token` appended to the HTTP header.
-
-```hcl
-Kind = "jwt-provider"
-Name = "okta"
-
-Issuer = "okta"
-
-JSONWebKeySet = {
- Remote = {
- URI = "https://.okta.com/oauth2/default/v1/keys"
- CacheDuration = "30m"
- }
-}
-
-Forwarding = {
- HeaderName = "user-token"
-}
-```
-
-Refer to [JWT provider configuration entry](/consul/docs/connect/config-entries/jwt-provider) for more information about the fields you can configure.
-
-To write the configuration entry to Consul, use the [`consul config write` command](/consul/commands/config/write):
-
-```shell-session
-$ consul config write okta-provider.hcl
-```
-
-### Update service intentions
-
-After you create the JWT provider entry, you can update your service intentions so that proxies validate the token before authorizing a request. The following example includes the minimum required configuration to enable JWT authorization with service intentions:
-
-```hcl
-Kind = "service-intentions"
-Name = "web"
-JWT = {
- Providers = [
- {
- Name = "okta"
- }
- ]
-}
-```
-
-You can include additional configuration information to require the token to match specific claims. You can also configure the `JWT` field to apply only to requests that come from certain HTTP paths. Refer to [JWT validations with intentions](/consul/docs/connect/config-entries/service-intentions#jwt-validations-with-intentions) for an example configuration.
-
-After you update the service intention, write the configuration to Consul so that it takes effect:
-
-```shell-session
-$ consul config write web-intention.hcl
-```
diff --git a/website/content/docs/connect/intentions/legacy.mdx b/website/content/docs/connect/intentions/legacy.mdx
deleted file mode 100644
index 9151b1e8ca62..000000000000
--- a/website/content/docs/connect/intentions/legacy.mdx
+++ /dev/null
@@ -1,188 +0,0 @@
----
-layout: docs
-page_title: Intentions (Legacy Mode)
-description: >-
- Intentions define service communication permissions in the service mesh. As of version 1.9, Consul uses a new system for creating and managing intentions. Learn how intentions worked in earlier versions of Consul with this legacy documentation.
----
-
-# Intentions in Legacy Mode
-
-~> **1.8.x and earlier:** This document only applies in Consul versions 1.8.x
-and before. If you are using version 1.9.0 or later, refer to the [current intentions documentation](/consul/docs/connect/intentions).
-
-Intentions define access control for service-to-service connections in the service mesh. Intentions can be
-managed via the API, CLI, or UI.
-
-Intentions are enforced by the [proxy](/consul/docs/connect/proxies)
-or [natively integrated application](/consul/docs/connect/native) on
-inbound connections. After verifying the TLS client certificate, the
-[authorize API endpoint](/consul/api-docs/agent/connect#authorize) is called which verifies the connection
-is allowed by testing the intentions. If authorize returns false the
-connection must be terminated.
-
-The default intention behavior is defined by the default [ACL
-policy](/consul/docs/agent/config/config-files#acl_default_policy). If the default ACL policy is
-"allow all", then all service-to-service connections in the mesh are allowed by default. If the
-default ACL policy is "deny all", then all service-to-service connections are denied by
-default.
-
-## Intention Basics
-
-Intentions can be managed via the [API](/consul/api-docs/connect/intentions),
-[CLI](/consul/commands/intention), or UI. Please see the respective documentation for
-each for full details on options, flags, etc. Below is an example of a basic
-intention to show the basic attributes of an intention. The full data model of
-an intention can be found in the [API
-documentation](/consul/api-docs/connect/intentions).
-
-```shell-session
-$ consul intention create -deny web db
-Created: web => db (deny)
-```
-
-The intention above is a deny intention with a source of "web" and
-destination of "db". This says that connections from web to db are not
-allowed and the connection will be rejected.
-
-When an intention is modified, existing connections will not be affected.
-This means that changing a connection from "allow" to "deny" today
-_will not_ kill the connection. Addressing this shortcoming is on
-the near term roadmap for Consul.
-
-### Wildcard Intentions
-
-An intention source or destination may also be the special wildcard
-value `*`. This matches _any_ value and is used as a catch-all. Example:
-
-```shell-session
-$ consul intention create -deny web '*'
-Created: web => * (deny)
-```
-
-This example says that the "web" service cannot connect to _any_ service.
-
-### Metadata
-
-Arbitrary string key/value data may be associated with intentions. This
-is unused by Consul but can be used by external systems or for visibility
-in the UI.
-
-```shell-session
-$ consul intention create \
- -deny \
- -meta description='Hello there' \
- web db
-...
-
-$ consul intention get web db
-Source: web
-Destination: db
-Action: deny
-ID: 31449e02-c787-f7f4-aa92-72b5d9b0d9ec
-Meta[description]: Hello there
-Created At: Friday, 25-May-18 02:07:51 CEST
-```
-
-## Precedence and Match Order
-
-Intentions are matched in an implicit order based on specificity, preferring
-deny over allow. Specificity is determined by whether a value is an exact
-specified value or is the wildcard value `*`.
-The full precedence table is shown below and is evaluated
-top to bottom, with larger numbers being evaluated first.
-
-| Source Namespace | Source Name | Destination Namespace | Destination Name | Precedence |
-| ---------------- | ----------- | --------------------- | ---------------- | ---------- |
-| Exact | Exact | Exact | Exact | 9 |
-| Exact | `*` | Exact | Exact | 8 |
-| `*` | `*` | Exact | Exact | 7 |
-| Exact | Exact | Exact | `*` | 6 |
-| Exact | `*` | Exact | `*` | 5 |
-| `*` | `*` | Exact | `*` | 4 |
-| Exact | Exact | `*` | `*` | 3 |
-| Exact | `*` | `*` | `*` | 2 |
-| `*` | `*` | `*` | `*` | 1 |
-
-The precedence value can be read from the [API](/consul/api-docs/connect/intentions)
-after an intention is created.
-Precedence cannot be manually overridden today. This is a feature that will
-be added in a later version of Consul.
-
-In the case the two precedence values match, Consul will evaluate
-intentions based on lexicographical ordering of the destination then
-source name. In practice, this is a moot point since authorizing a connection
-has an exact source and destination value so its impossible for two
-valid non-wildcard intentions to match.
-
-The numbers in the table above are not stable. Their ordering will remain
-fixed but the actual number values may change in the future.
-
--> **Consul Enterprise** - Namespaces are an Enterprise feature. In Consul CE, any of the rows in
-the table with a `*` for either the source namespace or destination namespace are not applicable.
-
-## Intention Management Permissions
-
-Intention management can be protected by [ACLs](/consul/docs/security/acl).
-Permissions for intentions are _destination-oriented_, meaning the ACLs
-for managing intentions are looked up based on the destination value
-of the intention, not the source.
-
-Intention permissions are by default implicitly granted at `read` level
-when granting `service:read` or `service:write`. This is because a
-service registered that wants to use service mesh needs `intentions:read`
-for its own service name in order to know whether or not to authorize
-connections. The following ACL policy will implicitly grant `intentions:read`
-(note _read_) for service `web`.
-
-```hcl
-service "web" {
- policy = "write"
-}
-```
-
-It is possible to explicitly specify intention permissions. For example,
-the following policy will allow a service to be discovered without granting
-access to read intentions for it.
-
-```hcl
-service "web" {
- policy = "read"
- intentions = "deny"
-}
-```
-
-Note that `intentions:read` is required for a token that a mesh-enabled
-service uses to register itself or its proxy. If the token used does not
-have `intentions:read` then the agent will be unable to resolve intentions
-for the service and so will not be able to authorize any incoming connections.
-
-~> **Security Note:** Explicitly allowing `intentions:write` on the token you
-provide to a service instance at registration time opens up a significant
-additional vulnerability. Although you may trust the service _team_ to define
-which inbound connections they accept, using a combined token for registration
-allows a compromised instance to to redefine the intentions which allows many
-additional attack vectors and may be hard to detect. We strongly recommend only
-delegating `intentions:write` using tokens that are used by operations teams or
-orchestrators rather than spread via application config, or only manage
-intentions with management tokens.
-
-## Performance and Intention Updates
-
-The intentions for services registered with a Consul agent are cached
-locally on that agent. They are then updated via a background blocking query
-against the Consul servers.
-
-Service mesh connection attempts require only local agent
-communication for authorization and generally only impose microseconds
-of latency to the connection. All actions in the data path of connections
-require only local data to ensure minimal performance overhead.
-
-Updates to intentions are propagated nearly instantly to agents since agents
-maintain a continuous blocking query in the background for intention updates
-for registered services.
-
-Because all the intention data is cached locally, the agents can fail static.
-Even if the agents are severed completely from the Consul servers, inbound
-connection authorization continues to work for a configured amount of time.
-Changes to intentions will not be picked up until the partition heals, but
-will then automatically take effect when connectivity is restored.
diff --git a/website/content/docs/connect/k8s/crds.mdx b/website/content/docs/connect/k8s/crds.mdx
new file mode 100644
index 000000000000..73aaa00674d6
--- /dev/null
+++ b/website/content/docs/connect/k8s/crds.mdx
@@ -0,0 +1,389 @@
+---
+layout: docs
+page_title: Custom Resource Definitions (CRDs) for Consul on Kubernetes
+description: >-
+ Configuration entries define service mesh behaviors in order to secure and manage traffic. Learn about Consul's different config entry kinds and get links to configuration reference pages.
+---
+
+# Custom Resource Definitions (CRDs) for Consul on Kubernetes
+
+This topic describes how to manage Consul [configuration
+entries](/consul/docs/fundamentals/config-entry) with Kubernetes Custom
+Resources. Configuration entries provide cluster-wide defaults for the service
+mesh.
+
+## Supported configuration entries
+
+You may specify the following values in the `kind` field:
+
+- [`Mesh`](/consul/docs/reference/config-entry/mesh)
+- [`ExportedServices`](/consul/docs/reference/config-entry/exported-services)
+- [`PeeringAcceptor`](/consul/docs/east-west/cluster-peering/tech-specs/k8s#crd-specifications)
+- [`PeeringDialer`](/consul/docs/east-west/cluster-peering/tech-specs/k8s#crd-specifications)
+- [`ProxyDefaults`](/consul/docs/reference/config-entry/proxy-defaults)
+- [`Registration`](/consul/docs/reference/config-entry/registration)
+- [`SamenessGroup`](/consul/docs/reference/config-entry/sameness-group)
+- [`ServiceDefaults`](/consul/docs/reference/config-entry/service-defaults)
+- [`ServiceSplitter`](/consul/docs/reference/config-entry/service-splitter)
+- [`ServiceRouter`](/consul/docs/reference/config-entry/service-router)
+- [`ServiceResolver`](/consul/docs/reference/config-entry/service-resolver)
+- [`ServiceIntentions`](/consul/docs/reference/config-entry/service-intentions)
+- [`IngressGateway`](/consul/docs/reference/config-entry/ingress-gateway)
+- [`TerminatingGateway`](/consul/docs/reference/config-entry/terminating-gateway)
+
+## Installation
+
+Verify that you have installed the minimum version of the Helm chart (`0.28.0`).
+
+```shell-session
+$ Helm search repo hashicorp/consul
+NAME CHART VERSION APP VERSION DESCRIPTION
+hashicorp/consul 0.28.0 1.9.1 Official HashiCorp Consul Chart
+```
+
+Update your Helm repository cache if necessary.
+
+```shell-session
+$ helm repo update
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "hashicorp" chart repository
+Update Complete. ⎈Happy Helming!⎈
+```
+
+Refer to [Install with Helm Chart](/consul/docs/deploy/server/k8s/helm) for
+further installation instructions.
+
+**Note**: Configuration entries require `connectInject` to be enabled, which is
+a default behavior in the official Helm Chart. If you disabled this setting, you
+must re-enable it to use CRDs.
+
+## Usage
+
+Once installed, use `kubectl` to create and manage Consul's configuration entries.
+
+### Create
+
+Create configuration entries with `kubectl apply`.
+
+```shell-session
+$ cat < protocol: tcp
+servicedefaults.consul.hashicorp.com/foo edited
+```
+
+You can then use `kubectl get` to ensure the change was synced to Consul.
+
+```shell-session
+$ kubectl get servicedefaults foo
+NAME SYNCED
+foo True
+```
+
+### Delete
+
+Use `kubectl delete [kind] [name]` to delete the configuration entry.
+
+```shell-session
+$ kubectl delete servicedefaults foo
+servicedefaults.consul.hashicorp.com "foo" deleted
+```
+
+Use `kubectl get` to ensure the configuration entry was deleted.
+
+```shell-session
+$ kubectl get servicedefaults foo
+Error from server (NotFound): servicedefaults.consul.hashicorp.com "foo" not found
+```
+
+#### Delete hanging
+
+If running `kubectl delete` hangs without exiting, there may be a dependent
+configuration entry registered with Consul that prevents the target
+configuration entry from being deleted. For example, if you set the protocol of
+your service to `http` in `ServiceDefaults` and then create a `ServiceSplitter`,
+you are not be able to delete `ServiceDefaults`. This is because by deleting the
+`ServiceDefaults` config, you are setting the protocol back to the default,
+which is `tcp`. Because `ServiceSplitter` requires that the service has an
+`http` protocol, Consul does not allow you to delete the `ServiceDefaults` since
+that would put Consul into a broken state.
+
+In order to delete the `ServiceDefaults` config, you would need to first delete
+the `ServiceSplitter`.
+
+## Kubernetes namespaces
+
+### Consul CE ((#consul_oss))
+
+Consul Community Edition (Consul CE) ignores Kubernetes namespaces and registers all services into the same
+global Consul registry based on their names. For example, service `web` in Kubernetes namespace
+`web-ns` and service `admin` in Kubernetes namespace `admin-ns` are registered into
+Consul as `web` and `admin` with the Kubernetes source namespace ignored.
+
+When creating custom resources to configure these services, the namespace of the
+custom resource is also ignored. For example, you can create a `ServiceDefaults`
+custom resource for service `web` in the Kubernetes namespace `admin-ns` even though
+the `web` service is actually running in the `web-ns` namespace (although this is not recommended):
+
+```yaml
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceDefaults
+metadata:
+ name: web
+ namespace: admin-ns
+spec:
+ protocol: http
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: web
+ namespace: web-ns
+spec: ...
+```
+
+~> **Note:** If you create two custom resources with identical `kind` and `name` values in different Kubernetes namespaces, the last one you create is not able to sync.
+
+#### ServiceIntentions special case
+
+`ServiceIntentions` are different from the other custom resources because the
+name of the resource doesn't matter. For other resources, the name of the resource
+determines which service it configures. For example, this resource configures
+the service `web`:
+
+
+
+```yaml
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceDefaults
+metadata:
+ name: web
+spec:
+ protocol: http
+```
+
+
+
+For `ServiceIntentions`, because we need to support the ability to create
+wildcard intentions (e.g. `foo => * (allow)` meaning that `foo` can talk to **any** service),
+and because `*` is not a valid Kubernetes resource name, we instead use the field `spec.destination.name`
+to configure the destination service for the intention:
+
+
+
+```yaml
+# foo => * (allow)
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceIntentions
+metadata:
+ name: name-does-not-matter
+spec:
+ destination:
+ name: '*'
+ sources:
+ - name: foo
+ action: allow
+---
+# foo => web (allow)
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceIntentions
+metadata:
+ name: name-does-not-matter
+spec:
+ destination:
+ name: web
+ sources:
+ - name: foo
+ action: allow
+```
+
+
+
+If two `ServiceIntentions` resources set the same `spec.destination.name`, the
+last one created is not synced.
+
+### Consul Enterprise
+
+Consul Enterprise supports multiple configurations for how Kubernetes namespaces are mapped
+to Consul namespaces. The Consul namespace that the custom resource is registered
+into depends on the configuration being used but in general, you should create your
+custom resources in the same Kubernetes namespace as the service they configure.
+
+The details on each configuration are:
+
+1. **Mirroring** - The Kubernetes namespace is mirrored into Consul. For example, the
+ service `web` in Kubernetes namespace `web-ns` is registered as service `web`
+ in the Consul namespace `web-ns`. In the same vein, a `ServiceDefaults` custom resource with
+ name `web` in Kubernetes namespace `web-ns` configures that same service.
+
+ This is configured with [`connectInject.consulNamespaces`](/consul/docs/reference/k8s/helm#v-connectinject-consulnamespaces):
+
+
+
+ ```yaml
+ global:
+ name: consul
+ enableConsulNamespaces: true
+ image: hashicorp/consul-enterprise:-ent
+ connectInject:
+ consulNamespaces:
+ mirroringK8S: true
+ ```
+
+
+
+1. **Mirroring with prefix** - The Kubernetes namespace is mirrored into Consul
+ with a prefix added to the Consul namespace. For example, if the prefix is `k8s-` then service `web` in Kubernetes namespace `web-ns` will be registered as service `web`
+ in the Consul namespace `k8s-web-ns`. In the same vein, a `ServiceDefaults` custom resource with
+ name `web` in Kubernetes namespace `web-ns` configures that same service.
+
+ This is configured with [`connectInject.consulNamespaces`](/consul/docs/reference/k8s/helm#v-connectinject-consulnamespaces):
+
+
+
+ ```yaml
+ global:
+ name: consul
+ enableConsulNamespaces: true
+ image: hashicorp/consul-enterprise:-ent
+ connectInject:
+ consulNamespaces:
+ mirroringK8S: true
+ mirroringK8SPrefix: k8s-
+ ```
+
+
+
+1. **Single destination namespace** - The Kubernetes namespace is ignored and all services
+ are registered into the same Consul namespace. For example, if the destination Consul
+ namespace is `my-ns` then service `web` in Kubernetes namespace `web-ns` is registered as service `web` in Consul namespace `my-ns`.
+
+ In this configuration, the Kubernetes namespace of the custom resource is ignored.
+ For example, a `ServiceDefaults` custom resource with the name `web` in Kubernetes
+ namespace `admin-ns` configures the service with name `web` even though that
+ service is running in Kubernetes namespace `web-ns` because the `ServiceDefaults`
+ resource ends up registered into the same Consul namespace `my-ns`.
+
+ This is configured with [`connectInject.consulNamespaces`](/consul/docs/reference/k8s/helm#v-connectinject-consulnamespaces):
+
+
+
+ ```yaml
+ global:
+ name: consul
+ enableConsulNamespaces: true
+ image: hashicorp/consul-enterprise:-ent
+ connectInject:
+ consulNamespaces:
+ consulDestinationNamespace: 'my-ns'
+ ```
+
+
+
+ ~> **Note:** In this configuration, if two custom resources are created in two Kubernetes namespaces with identical `name` and `kind` values, the last one created is not synced.
+
+#### ServiceIntentions Special Case (Enterprise)
+
+`ServiceIntentions` are different from the other custom resources because the
+name of the resource does not matter. For other resources, the name of the resource
+determines which service it configures. For example, this resource configures
+the service `web`:
+
+
+
+```yaml
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceDefaults
+metadata:
+ name: web
+spec:
+ protocol: http
+```
+
+
+
+For `ServiceIntentions`, because we need to support the ability to create
+wildcard intentions (e.g. `foo => * (allow)` meaning that `foo` can talk to any service),
+and because `*` is not a valid Kubernetes resource name, we instead use the field `spec.destination.name`
+to configure the destination service for the intention:
+
+
+
+```yaml
+# foo => * (allow)
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceIntentions
+metadata:
+ name: name-does-not-matter
+spec:
+ destination:
+ name: '*'
+ sources:
+ - name: foo
+ action: allow
+---
+# foo => web (allow)
+apiVersion: consul.hashicorp.com/v1alpha1
+kind: ServiceIntentions
+metadata:
+ name: name-does-not-matter
+spec:
+ destination:
+ name: web
+ sources:
+ - name: foo
+ action: allow
+```
+
+
+
+In addition, we support the field `spec.destination.namespace` to configure
+the destination service's Consul namespace. If `spec.destination.namespace`
+is empty, then the Consul namespace used is the same as the other
+config entries as outlined above.
diff --git a/website/content/docs/connect/k8s/index.mdx b/website/content/docs/connect/k8s/index.mdx
new file mode 100644
index 000000000000..43dae42002a2
--- /dev/null
+++ b/website/content/docs/connect/k8s/index.mdx
@@ -0,0 +1,43 @@
+---
+layout: docs
+page_title: Connect Kubernetes services with Consul
+description: >-
+ Consul documentation provides reference material for all features and options available in Consul.
+---
+
+# Connect Kubernetes service mesh with Consul
+
+This page describes the process to deploy sidecar proxies on Kubernetes so that your services can connect to Consul's service mesh.
+
+## Introduction
+
+Consul service mesh is enabled by default when you install Consul on Kubernetes using the Consul Helm chart. Consul also automatically injects sidecars into the pods in your clusters that run Envoy. These sidecar proxies, called Consul dataplanes, are enabled when `connectInject.default` is set to `false` in the Helm chart.
+
+## Workflows
+
+To get started with the Consul service mesh on Kubernetes, [enable and configure the connect injector](/consul/docs/connect/k8s/inject).
+
+If `connectInject.default` is set to `false` or you want to explicitly enable service mesh sidecar proxy injection for a specific deployment, add the `consul.hashicorp.com/connect-inject` annotation to the pod specification template and set it to `true` when connecting services to the mesh.
+
+Additional configuration examples are available to help you configure your workloads:
+
+- [Kubernetes Pods running as a deployment](/consul/docs/connect/k8s/workload#kubernetes-pods-running-as-a-deployment)
+- [Connecting to mesh-enabled Services](/consul/docs/connect/k8s/workload#connecting-to-mesh-enabled-services)
+- [Kubernetes Jobs](/consul/docs/connect/k8s/workload#kubernetes-jobs)
+- [Kubernetes Pods with multiple ports](/consul/docs/connect/k8s/workload#kubernetes-pods-with-multiple-ports)
+
+## Service names
+
+When the service is onboarded, the name registered in Consul is set to the name of the Kubernetes Service associated with the Pod. You can use the [`consul.hashicorp.com/connect-service` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service) to specify a custom name for the service, but if ACLs are enabled then the name of the service registered in Consul must match the Pod's `ServiceAccount` name.
+
+## Transparent proxy mode
+
+By default, the Consul service mesh runs in transparent proxy mode. This mode forces inbound and outbound traffic through the sidecar proxy even though the service binds to all interfaces. Transparent proxy infers the location of upstream services using Consul service intentions, and also allows you to use Kubernetes DNS as you normally would for your workloads.
+
+When transparent proxy mode is enabled, all service-to-service traffic is required to use mTLS. When onboarding new services to service mesh, your network may have mixed mTLS and non-mTLS traffic, which can result in broken service-to-service communication. You can temporarily enable permissive mTLS mode during the onboarding process so that existing mesh services can accept traffic from services that are not yet fully onboarded. Permissive mTLS enables sidecar proxies to access both mTLS and non-mTLS traffic. Refer to [Onboard mesh services in transparent proxy mode](/consul/docs/register/service/k8s/transparent-proxy) for additional information.
+
+## Next steps
+
+After you start the sidecar proxies, the rest of Consul's service mesh features are available. You can now use Consul to [manage traffic between services](/consul/docs/manage-traffic/k8s) and [observe service mesh telemetry](/consul/docs/observe/telemetry/k8s).
+
+Your current service mesh is not ready for production environments. To secure north/south access from external sources into the service mesh, [Deploy the Consul API gateway](/consul/docs/north-south/api-gateway). Then, you must secure service-to-service communication with mTLS certificates and service intentions. Refer to [secure the service mesh](/consul/docs/secure-mesh/k8s) for more information.
\ No newline at end of file
diff --git a/website/content/docs/connect/k8s/inject.mdx b/website/content/docs/connect/k8s/inject.mdx
new file mode 100644
index 000000000000..038b4a590dc0
--- /dev/null
+++ b/website/content/docs/connect/k8s/inject.mdx
@@ -0,0 +1,200 @@
+---
+layout: docs
+page_title: Connect Kubernetes services with Consul
+description: >-
+ Consul documentation provides reference material for all features and options available in Consul.
+---
+
+# Custom Consul injection behavior
+
+This page describes the process to enable the Consul injector so that you can use Consul's service mesh features on Kubernetes, and then configure custom injection behavior such as defaults for Consul and Kubernetes namespaces.
+
+## Enable connect injector
+
+The service mesh sidecar proxy is injected via a
+[mutating admission webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
+call the connect injector provided by the
+[consul-k8s project](https://github.com/hashicorp/consul-k8s).
+This enables the automatic pod mutation shown in the usage section above.
+Installation of the mutating admission webhook is automated using the
+[Helm chart](/consul/docs/deploy/server/k8s/helm).
+
+To install the connect injector, enable the connect injection feature using
+[Helm values](/consul/docs/reference/k8s/helm#configuration-values) and
+upgrade the installation using `helm upgrade` for existing installs or
+`helm install` for a fresh install.
+
+```yaml
+connectInject:
+ enabled: true
+```
+
+This will configure the injector to inject when the
+[injection annotation](#consul-hashicorp-com-connect-inject)
+is set to `true`. Other values in the Helm chart can be used to limit the namespaces
+the injector runs in, enable injection by default, and more.
+
+## Verify the injection
+
+To verify the installation, run the
+["Accepting Inbound Connections"](/consul/docs/k8s/connect#accepting-inbound-connections)
+example from the "Usage" section above. After running this example, run
+`kubectl get pod static-server --output yaml`. In the raw YAML output, you should
+see connect injected containers and an annotation
+`consul.hashicorp.com/connect-inject-status` set to `injected`. This
+confirms that injection is working properly.
+
+If you do not see this, then use `kubectl logs` against the injector pod
+and note any errors.
+
+## Controlling Injection with Annotations
+
+By default, the injector will inject only when the
+[injection annotation](#consul-hashicorp-com-connect-inject)
+on the pod (not the deployment) is set to `true`:
+
+```yaml
+annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+```
+
+### Injection Defaults
+
+If you wish for the injector to always inject, you can set the default to `true`
+in the Helm chart:
+
+```yaml
+connectInject:
+ enabled: true
+ default: true
+```
+
+You can then exclude specific pods via annotation:
+
+```yaml
+annotations:
+ 'consul.hashicorp.com/connect-inject': 'false'
+```
+
+## Controlling Injection for Namespace
+
+You can control which Kubernetes namespaces are allowed to be injected via
+the `k8sAllowNamespaces` and `k8sDenyNamespaces` keys:
+
+```yaml
+connectInject:
+ enabled: true
+ k8sAllowNamespaces: ['*']
+ k8sDenyNamespaces: []
+```
+
+In the default configuration (shown above), services from all namespaces are allowed
+to be injected. Whether or not they're injected depends on the value of `connectInject.default`
+and the `consul.hashicorp.com/connect-inject` annotation.
+
+If you wish to only enable injection in specific namespaces, you can list only those
+namespaces in the `k8sAllowNamespaces` key. In the configuration below
+only the `my-ns-1` and `my-ns-2` namespaces will be enabled for injection.
+All other namespaces will be ignored, even if the connect inject [annotation](#consul-hashicorp-com-connect-inject)
+is set.
+
+```yaml
+connectInject:
+ enabled: true
+ k8sAllowNamespaces: ['my-ns-1', 'my-ns-2']
+ k8sDenyNamespaces: []
+```
+
+If you wish to enable injection in every namespace _except_ specific namespaces, you can
+use `*` in the allow list to allow all namespaces and then specify the namespaces to exclude in the deny list:
+
+```yaml
+connectInject:
+ enabled: true
+ k8sAllowNamespaces: ['*']
+ k8sDenyNamespaces: ['no-inject-ns-1', 'no-inject-ns-2']
+```
+
+-> **NOTE:** The deny list takes precedence over the allow list. If a namespace
+is listed in both lists, it will **not** be synced.
+
+~> **NOTE:** The `kube-system` and `kube-public` namespaces will never be injected.
+
+### Consul Enterprise Namespaces
+
+Consul Enterprise 1.7+ supports Consul namespaces. When Kubernetes pods are registered
+into Consul, you can control which Consul namespace they are registered into.
+
+There are three options available:
+
+1. **Single Destination Namespace** – Register all Kubernetes pods, regardless of namespace,
+ into the same Consul namespace.
+
+ This can be configured with:
+
+ ```yaml
+ global:
+ enableConsulNamespaces: true
+
+ connectInject:
+ enabled: true
+ consulNamespaces:
+ consulDestinationNamespace: 'my-consul-ns'
+ ```
+
+ -> **NOTE:** If the destination namespace does not exist we will create it.
+
+1. **Mirror Namespaces** - Register each Kubernetes pod into a Consul namespace with the same name as its Kubernetes namespace.
+ For example, pod `foo` in Kubernetes namespace `ns-1` will be synced to the Consul namespace `ns-1`.
+ If a mirrored namespace does not exist in Consul, it will be created.
+
+ This can be configured with:
+
+ ```yaml
+ global:
+ enableConsulNamespaces: true
+
+ connectInject:
+ enabled: true
+ consulNamespaces:
+ mirroringK8S: true
+ ```
+
+1. **Mirror Namespaces With Prefix** - Register each Kubernetes pod into a Consul namespace with the same name as its Kubernetes
+ namespace **with a prefix**.
+ For example, given a prefix `k8s-`, pod `foo` in Kubernetes namespace `ns-1` will be synced to the Consul namespace `k8s-ns-1`.
+
+ This can be configured with:
+
+ ```yaml
+ global:
+ enableConsulNamespaces: true
+
+ connectInject:
+ enabled: true
+ consulNamespaces:
+ mirroringK8S: true
+ mirroringK8SPrefix: 'k8s-'
+ ```
+
+### Consul Enterprise Namespace Upstreams
+
+When [transparent proxy](/consul/docs/connect/transparent-proxy) is enabled and ACLs are disabled,
+the upstreams will be configured automatically across Consul namespaces.
+When ACLs are enabled, you must configure it by specifying an [intention](/consul/docs/secure-mesh/intention),
+allowing services across Consul namespaces to talk to each other.
+
+If you wish to specify an upstream explicitly via the `consul.hashicorp.com/connect-service-upstreams` annotation,
+use the format `[service-name].[namespace]:[port]:[optional datacenter]`:
+
+```yaml
+annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ 'consul.hashicorp.com/connect-service-upstreams': '[service-name].[namespace]:[port]:[optional datacenter]'
+```
+
+See [consul.hashicorp.com/connect-service-upstreams](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) for more details.
+
+-> **Note:** When you specify upstreams via an upstreams annotation, you will need to use
+`localhost:` with the port from the upstreams annotation instead of KubeDNS to connect to your upstream
+application.
\ No newline at end of file
diff --git a/website/content/docs/connect/k8s/workload.mdx b/website/content/docs/connect/k8s/workload.mdx
new file mode 100644
index 000000000000..2d7a9059ad7e
--- /dev/null
+++ b/website/content/docs/connect/k8s/workload.mdx
@@ -0,0 +1,486 @@
+---
+layout: docs
+page_title: Kubernetes service mesh workload scenarios
+description: >-
+ An injection annotation allows Consul to automatically deploy sidecar proxies on Kubernetes pods, enabling Consul's service mesh for containers running on k8s. Learn how to configure sidecars, enable services with multiple ports (multiport or multi-port Services), change default injection settings.
+---
+
+# Kubernetes service mesh workload scenarios
+
+This page provides example workflows for registering workloads on Kubernetes into Consul's service mesh in different scenarios, including multiport deployments. Each scenario provides an example Kubernetes manifest to demonstrate how to use Consul's service mesh with a specific Kubernetes workload type.
+
+-> **Note:** A Kubernetes Service is required in order to register services on the Consul service mesh. Consul monitors the lifecycle of the Kubernetes Service and its service instances using the service object. In addition, the Kubernetes service is used to register and de-register the service from Consul's catalog.
+
+## Kubernetes Pods running as a deployment
+
+The following example shows a Kubernetes configuration that specifically enables service mesh connections for the `static-server` service. Consul starts and registers a sidecar proxy that listens on port 20000 by default and proxies valid inbound connections to port 8080.
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ # This name will be the service name in Consul.
+ name: static-server
+spec:
+ selector:
+ app: static-server
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8080
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: static-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: static-server
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: static-server
+ template:
+ metadata:
+ name: static-server
+ labels:
+ app: static-server
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ spec:
+ containers:
+ - name: static-server
+ image: hashicorp/http-echo:latest
+ args:
+ - -text="hello world"
+ - -listen=:8080
+ ports:
+ - containerPort: 8080
+ name: http
+ # If ACLs are enabled, the serviceAccountName must match the Consul service name.
+ serviceAccountName: static-server
+```
+
+
+
+To establish a connection to the upstream Pod using service mesh, a client must dial the upstream workload using a mesh proxy. The client mesh proxy will use Consul service discovery to find all available upstream proxies and their public ports.
+
+## Connecting to mesh-enabled Services
+
+The example Deployment specification below configures a Deployment that is capable
+of establishing connections to our previous example "static-server" service. The
+connection to this static text service happens over an authorized and encrypted
+connection via service mesh.
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ # This name will be the service name in Consul.
+ name: static-client
+spec:
+ selector:
+ app: static-client
+ ports:
+ - port: 80
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: static-client
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: static-client
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: static-client
+ template:
+ metadata:
+ name: static-client
+ labels:
+ app: static-client
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ spec:
+ containers:
+ - name: static-client
+ image: curlimages/curl:latest
+ # Just spin & wait forever, we'll use `kubectl exec` to demo
+ command: ['/bin/sh', '-c', '--']
+ args: ['while true; do sleep 30; done;']
+ # If ACLs are enabled, the serviceAccountName must match the Consul service name.
+ serviceAccountName: static-client
+```
+
+
+
+By default when ACLs are enabled or when ACLs default policy is `allow`,
+Consul will automatically configure proxies with all upstreams from the same datacenter.
+When ACLs are enabled with default `deny` policy,
+you must supply an [intention](/consul/docs/secure-mesh/intention) to tell Consul which upstream you need to talk to.
+
+When upstreams are specified explicitly with the
+[`consul.hashicorp.com/connect-service-upstreams` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams),
+the injector will also set environment variables `_CONNECT_SERVICE_HOST`
+and `_CONNECT_SERVICE_PORT` in every container in the Pod for every defined
+upstream. This is analogous to the standard Kubernetes service environment variables, but
+point instead to the correct local proxy port to establish connections via
+service mesh.
+
+You cannot reference auto-generated environment variables when the upstream annotation contains a dot. This is because Consul also renders the environment variables to include a dot. For example, Consul renders the variables generated for `static-server.svc:8080` as `STATIC-SERVER.SVC_CONNECT_SERVICE_HOST` and `STATIC_SERVER.SVC_CONNECT_SERVICE_PORT`, which makes the variables unusable.
+You can verify access to the static text server using `kubectl exec`.
+Because transparent proxy is enabled by default,
+use Kubernetes DNS to connect to your desired upstream.
+
+```shell-session
+$ kubectl exec deploy/static-client -- curl --silent http://static-server/
+"hello world"
+```
+
+You can control access to the server using [intentions](/consul/docs/secure-mesh/intention).
+If you use the Consul UI or [CLI](/consul/commands/intention/create) to
+deny communication between
+"static-client" and "static-server", connections are immediately rejected
+without updating either of the running pods. You can then remove this
+intention to allow connections again.
+
+```shell-session
+$ kubectl exec deploy/static-client -- curl --silent http://static-server/
+command terminated with exit code 52
+```
+
+## Kubernetes Jobs
+
+Kubernetes Jobs run pods that only make outbound requests to services on the mesh and successfully terminate when they are complete. In order to register a Kubernetes Job with the mesh, you must provide an integer value for the `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` annotation. Then, issue a request to the `http://127.0.0.1:20600/graceful_shutdown` API endpoint so that Kubernetes gracefully shuts down the `consul-dataplane` sidecar after the job is complete.
+
+Below is an example Kubernetes manifest that deploys a job correctly.
+
+
+
+```yaml
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: test-job
+ namespace: default
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: test-job
+ namespace: default
+spec:
+ selector:
+ app: test-job
+ ports:
+ - port: 80
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test-job
+ namespace: default
+ labels:
+ app: test-job
+spec:
+ template:
+ metadata:
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ 'consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds': '5'
+ labels:
+ app: test-job
+ spec:
+ containers:
+ - name: test-job
+ image: alpine/curl:3.14
+ ports:
+ - containerPort: 80
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "Started test job"
+ sleep 10
+ echo "Killing proxy"
+ curl --max-time 2 -s -f -X POST http://127.0.0.1:20600/graceful_shutdown
+ sleep 10
+ echo "Ended test job"
+ serviceAccountName: test-job
+ restartPolicy: Never
+```
+
+
+
+Upon completing the job you should be able to verify that all containers are shut down within the pod.
+
+```shell-session
+$ kubectl get pods
+NAME READY STATUS RESTARTS AGE
+test-job-49st7 0/2 Completed 0 3m55s
+```
+
+```shell-session
+$ kubectl get job
+NAME COMPLETIONS DURATION AGE
+test-job 1/1 30s 4m31s
+```
+
+In addition, based on the logs emitted by the pod you can verify that the proxy was shut down before the Job completed.
+
+```shell-session
+$ kubectl logs test-job-49st7 -c test-job
+Started test job
+Killing proxy
+Ended test job
+```
+
+## Kubernetes Pods with multiple ports
+
+To configure a pod with multiple ports to be a part of the service mesh and receive and send service mesh traffic, you
+will need to add configuration so that a Consul service can be registered per port. This is because services in Consul
+currently support a single port per service instance.
+
+In the following example, suppose we have a pod which exposes 2 ports, `8080` and `9090`, both of which will need to
+receive service mesh traffic.
+
+First, decide on the names for the two Consul services that will correspond to those ports. In this example, the user
+chooses the names `web` for `8080` and `web-admin` for `9090`.
+
+Create two service accounts for `web` and `web-admin`:
+
+
+
+```yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: web
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: web-admin
+```
+
+
+
+
+Create two Service objects for `web` and `web-admin`:
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: web
+spec:
+ selector:
+ app: web
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8080
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: web-admin
+spec:
+ selector:
+ app: web
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 9090
+```
+
+
+
+`web` will target `containerPort` `8080` and select pods labeled `app: web`. `web-admin` will target `containerPort`
+`9090` and will also select the same pods.
+
+~> Kubernetes 1.24+ only
+In Kubernetes 1.24+ you need to [create a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#service-account-token-secrets) for each additional Consul service associated with the pod in order to expose the Kubernetes ServiceAccount token to the Consul dataplane container running under the pod serviceAccount. The Kubernetes secret name must match the ServiceAccount name:
+
+
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: web
+ annotations:
+ kubernetes.io/service-account.name: web
+type: kubernetes.io/service-account-token
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: web-admin
+ annotations:
+ kubernetes.io/service-account.name: web-admin
+type: kubernetes.io/service-account-token
+```
+
+
+
+Create a Deployment with any chosen name, and use the following annotations:
+```yaml
+annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ 'consul.hashicorp.com/transparent-proxy': 'false'
+ 'consul.hashicorp.com/connect-service': 'web,web-admin'
+ 'consul.hashicorp.com/connect-service-port': '8080,9090'
+```
+Note that the order the ports are listed in the same order as the service names, i.e. the first service name `web`
+corresponds to the first port, `8080`, and the second service name `web-admin` corresponds to the second port, `9090`.
+
+The service account on the pod spec for the deployment should be set to the first service name `web`:
+```yaml
+serviceAccountName: web
+```
+
+The following deployment example demonstrates the required annotations for the manifest. In addition, the previous YAML manifests can also be combined into a single manifest for easier deployment.
+
+
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: web
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: web
+ template:
+ metadata:
+ name: web
+ labels:
+ app: web
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ 'consul.hashicorp.com/transparent-proxy': 'false'
+ 'consul.hashicorp.com/connect-service': 'web,web-admin'
+ 'consul.hashicorp.com/connect-service-port': '8080,9090'
+ spec:
+ containers:
+ - name: web
+ image: hashicorp/http-echo:latest
+ args:
+ - -text="hello world"
+ - -listen=:8080
+ ports:
+ - containerPort: 8080
+ name: http
+ - name: web-admin
+ image: hashicorp/http-echo:latest
+ args:
+ - -text="hello world from 9090"
+ - -listen=:9090
+ ports:
+ - containerPort: 9090
+ name: http
+ serviceAccountName: web
+```
+
+
+
+After deploying the `web` application, you can test service mesh connections by deploying the `static-client`
+application with the configuration in the [previous section](#connecting-to-mesh-enabled-services) and add the
+`consul.hashicorp.com/connect-service-upstreams: 'web:1234,web-admin:2234'` annotation to the pod template on `static-client`:
+
+
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ # This name will be the service name in Consul.
+ name: static-client
+spec:
+ selector:
+ app: static-client
+ ports:
+ - port: 80
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: static-client
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: static-client
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: static-client
+ template:
+ metadata:
+ name: static-client
+ labels:
+ app: static-client
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ 'consul.hashicorp.com/connect-service-upstreams': 'web:1234,web-admin:2234'
+ spec:
+ containers:
+ - name: static-client
+ image: curlimages/curl:latest
+ # Just spin & wait forever, we'll use `kubectl exec` to demo
+ command: ['/bin/sh', '-c', '--']
+ args: ['while true; do sleep 30; done;']
+ # If ACLs are enabled, the serviceAccountName must match the Consul service name.
+ serviceAccountName: static-client
+```
+
+
+
+If you exec on to a static-client pod, using a command like:
+```shell-session
+$ kubectl exec -it static-client-5bd667fbd6-kk6xs -- /bin/sh
+```
+you can then run:
+```shell-session
+$ curl localhost:1234
+```
+to see the output `hello world` and run:
+```shell-session
+$ curl localhost:2234
+```
+to see the output `hello world from 9090`.
+
+The way this works is that a Consul service instance is being registered per port on the Pod, so there are 2 Consul
+services in this case. An additional Envoy sidecar proxy and `connect-init` init container are also deployed per port in
+the Pod. So the upstream configuration can use the individual service names to reach each port as seen in the example.
+
+### Caveats for Multi-port Pods
+
+- Transparent proxy is not supported for multi-port Pods.
+- Metrics and metrics merging is not supported for multi-port Pods.
+- Upstreams will only be set on the first service's Envoy sidecar proxy for the pod.
+ - This means that ServiceIntentions from a multi-port pod to elsewhere, will need to use the first service's name,
+ `web` in the example above to accept connections from either `web` or `web-admin`. ServiceIntentions from elsewhere
+ to a multi-port pod can use the individual service names within the multi-port Pod.
+- Health checking is done on a per-Pod basis, so if any Kubernetes health checks (like readiness, liveness, etc) are
+ failing for any container on the Pod, the entire Pod is marked unhealthy, and any Consul service referencing that Pod
+ will also be marked as unhealthy. So, if `web` has a failing health check, `web-admin` would also be marked as
+ unhealthy for service mesh traffic.
\ No newline at end of file
diff --git a/website/content/docs/connect/lambda/function.mdx b/website/content/docs/connect/lambda/function.mdx
new file mode 100644
index 000000000000..1600a67d8952
--- /dev/null
+++ b/website/content/docs/connect/lambda/function.mdx
@@ -0,0 +1,82 @@
+---
+layout: docs
+page_title: Invoke AWS Lambda Functions
+description: >-
+ You can invoke an Amazon Web Services Lambda function in your Consul service mesh by configuring terminating gateways or sidecar proxies. Learn how to declare a registered function as an upstream and why we recommend using terminating gateways with Lambda.
+---
+
+# Invoke Lambda Functions from Mesh Services
+
+This topic describes how to invoke AWS Lambda functions from the Consul service mesh.
+
+## Overview
+
+You can invoke Lambda functions from the Consul service mesh through terminating gateways (recommended) or directly from service mesh proxies.
+
+### Terminating Gateway
+
+We recommend invoking Lambda functions through terminating gateways. This method supports cross-datacenter communication, transparent
+proxies, intentions, and all other Consul service mesh features.
+
+The terminating gateway must have [the appropriate IAM permissions](/consul/docs/lambda/registration#configure-iam-permissions-for-envoy)
+to invoke the function.
+
+The following diagram shows the invocation procedure:
+
+
+
+
+
+
+
+1. Make an HTTP request to the local service mesh proxy.
+1. The service mesh proxy forwards the request to the terminating gateway.
+1. The terminating gateway invokes the function.
+
+### Service Mesh Proxy
+
+You can invoke Lambda functions directly from a service's mesh sidecar proxy.
+This method has the following limitations:
+- Intentions are unsupported. Consul enforces intentions by validating the client certificates presented when a connection is received. Lambda does not support client certificate validation, which prevents Consul from supporting intentions using this method.
+- Transparent proxies are unsupported. This is because Lambda services are not
+ registered to a proxy.
+
+This method is secure because AWS IAM permissions is required to invoke Lambda functions. Additionally, all communication is encrypted with Amazon TLS when invoking Lambda resources.
+
+The Envoy sidecar proxy must have the correct AWS IAM credentials to invoke the function. You can define the credentials in environment variables, EC2 metadata, or ECS task metadata.
+
+The following diagram shows the invocation procedure:
+
+
+
+
+
+
+
+1. Make an HTTP request to the local service mesh proxy.
+2. The service mesh proxy invokes the Lambda.
+
+## Invoke a Lambda Function
+
+Before you can invoke a Lambda function, register the service used to invoke the Lambda function and the service running in Lambda with Consul (refer to [registration](/consul/docs/register/service/lambda) for instructions). The service used to invoke the function must be deployed to the service mesh.
+
+1. Update the invoking service to use the Lambda service as an upstream. In the following example, the `destination_name` for the invoking service (`api`) points to a Lambda service called `authentication`:
+
+ ```hcl
+ upstreams {
+ local_bind_port = 2345
+ destination_name = "authentication"
+ }
+ ```
+
+1. Issue the `consul services register` command to store the configuration:
+
+ ```shell-session
+ $ consul services register api-sidecar-proxy.hcl
+ ```
+
+1. Call the upstream service to invoke the Lambda function. In the following example, the `api` service invokes the `authentication` service at `localhost:2345`:
+
+ ```shell-session
+ $ curl https://localhost:2345
+ ```
diff --git a/website/content/docs/connect/lambda/index.mdx b/website/content/docs/connect/lambda/index.mdx
new file mode 100644
index 000000000000..06eb9ba0a93f
--- /dev/null
+++ b/website/content/docs/connect/lambda/index.mdx
@@ -0,0 +1,40 @@
+---
+layout: docs
+page_title: Connect Lambda services with Consul
+description: >-
+ Consul documentation provides reference material for all features and options available in Consul.
+---
+
+# Connect Lambda services with Consul
+
+You can configure Consul to allow services in your mesh to invoke Lambda functions, as well as allow Lambda functions to invoke services in your mesh. Lambda functions are programs or scripts that run in AWS Lambda. Refer to the [AWS Lambda website](https://aws.amazon.com/lambda/) for additional information.
+
+## Register Lambda functions into Consul
+
+The first step is to register your Lambda functions into Consul. We recommend using the [Lambda registrator module](https://github.com/hashicorp/terraform-aws-consul-lambda/tree/main/modules/lambda-registrator) to automatically synchronize Lambda functions into Consul. You can also manually register Lambda functions into Consul if you are unable to use the Lambda registrator.
+
+Refer to [Lambda Function Registration Requirements](/consul/docs/register/service/lambda) for additional information about registering Lambda functions into Consul.
+
+## Invoke Lambda functions from Consul service mesh
+
+After registering AWS Lambda functions, you can invoke Lambda functions from the Consul service mesh through terminating gateways (recommended) or directly from connected proxies.
+
+Refer to [Invoke Lambda Functions from Services](/consul/docs/connect/lambda/function) for details.
+
+## Invoke mesh services from Lambda function
+
+
+
+Functionality associated with beta features are subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.
+
+
+
+You can also add the `consul-lambda-extension` plugin as a layer in your Lambda functions, which enables them to send requests to services in the mesh. The plugin starts a lightweight sidecar proxy that directs requests from Lambda functions to [mesh gateways](/consul/docs/connect/gateways#mesh-gateways). The gateways route traffic to the destination service to complete the request.
+
+
+
+Refer to [Invoke Services from Lambda Functions](/consul/docs/connect/lambda/service) for additional information about registering Lambda functions into Consul.
+
+Consul mesh gateways are required to send requests from Lambda functions to mesh services. Refer to [Mesh Gateways](/consul/docs/east-west/mesh-gateway/) for additional information.
+
+Note that L7 traffic management features are not supported. As a result, requests from Lambda functions ignore service routes and splitters.
diff --git a/website/content/docs/connect/lambda/service.mdx b/website/content/docs/connect/lambda/service.mdx
new file mode 100644
index 000000000000..7cd6b736609b
--- /dev/null
+++ b/website/content/docs/connect/lambda/service.mdx
@@ -0,0 +1,273 @@
+---
+layout: docs
+page_title: Invoke Services from Lambda Functions
+description: >-
+ This topic describes how to invoke services in the mesh from Lambda functions registered with Consul.
+---
+
+# Invoke Services from Lambda Functions
+
+This topic describes how to invoke services in the mesh from Lambda functions registered with Consul.
+
+~> **Lambda-to-mesh functionality is currently in beta**: Functionality associated with beta features are subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.
+
+## Introduction
+
+The following steps describe the process:
+
+1. Deploy the destination service and mesh gateway.
+1. Deploy the Lambda extension layer.
+1. Deploy the Lambda registrator.
+1. Write the Lambda function code.
+1. Deploy the Lambda function.
+1. Invoke the Lambda function.
+
+You must add the `consul-lambda-extension` extension as a Lambda layer to enable Lambda functions to send requests to mesh services. Refer to the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/invocation-layers.html) for instructions on how to add layers to your Lambda functions.
+
+The layer runs an external Lambda extension that starts a sidecar proxy. The proxy listens on one port for each upstream service and upgrades the outgoing connections to mTLS. It then proxies the requests through to [mesh gateways](/consul/docs/connect/gateways#mesh-gateways).
+
+## Prerequisites
+
+You must deploy the destination services and mesh gateway prior to deploying your Lambda service with the `consul-lambda-extension` layer.
+
+### Deploy the destination service
+
+There are several methods for deploying services to Consul service mesh. The following example configuration deploys a service named `static-server` with Consul on Kubernetes.
+
+```yaml
+kind: Service
+apiVersion: v1
+metadata:
+ # Specifies the service name in Consul.
+ name: static-server
+spec:
+ selector:
+ app: static-server
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8080
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: static-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: static-server
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: static-server
+ template:
+ metadata:
+ name: static-server
+ labels:
+ app: static-server
+ annotations:
+ 'consul.hashicorp.com/connect-inject': 'true'
+ spec:
+ containers:
+ - name: static-server
+ image: hashicorp/http-echo:latest
+ args:
+ - -text="hello world"
+ - -listen=:8080
+ ports:
+ - containerPort: 8080
+ name: http
+ serviceAccountName: static-server
+```
+
+### Deploy the mesh gateway
+
+The mesh gateway must be running and registered to the Lambda function’s Consul datacenter. Refer to the following documentation and tutorials for instructions:
+
+- [Mesh Gateways between WAN-Federated Datacenters](/consul/docs/east-west/mesh-gateway/federation)
+- [Mesh Gateways between Admin Partitions](/consul/docs/east-west/mesh-gateway/admin-partition)
+- [Establish cluster peering connections](/consul/docs/east-west/cluster-peering/establish/vm)
+- [Connect Services Across Datacenters with Mesh Gateways](/consul/tutorials/developer-mesh/service-mesh-gateways)
+
+## Deploy the Lambda extension layer
+
+The `consul-lambda-extension` extension runs during the `Init` phase of the Lambda function execution. The extension retrieves the data that the Lambda registrator has been configured to store from AWS Parameter Store and creates a lightweight TCP proxy. The proxy creates a local listener for each upstream defined in the `CONSUL_SERVICE_UPSTREAMS` environment variable.
+
+The extension periodically retrieves the data from the AWS Parameter Store so that the function can process requests. When the Lambda function receives a shutdown event, the extension also stops.
+
+1. Download the `consul-lambda-extension` extension from [releases.hashicorp.com](https://releases.hashicorp.com/):
+
+ ```shell-session
+ curl -o consul-lambda-extension__linux_amd64.zip https://releases.hashicorp.com/consul-lambda//consul-lambda-extension__linux_amd64.zip
+ ```
+1. Create the AWS Lambda layer in the same AWS region as the Lambda function. You can create the layer manually using the AWS CLI or AWS Console, but we recommend using Terraform:
+
+
+
+ ```hcl
+ resource "aws_lambda_layer_version" "consul_lambda_extension" {
+ layer_name = "consul-lambda-extension"
+ filename = "consul-lambda-extension__linux_amd64.zip"
+ source_code_hash = filebase64sha256("consul-lambda-extension__linux_amd64.zip")
+ description = "Consul service mesh extension for AWS Lambda"
+ }
+ ```
+
+
+
+## Deploy the Lambda registrator
+
+Configure and deploy the Lambda registrator. Refer to the [registrator configuration documentation](/consul/docs/lambda/registration/automate#configuration) and the [registrator deployment documentation](/consul/docs/lambda/registration/automate#deploy-the-lambda-registrator) for instructions.
+
+## Write the Lambda function code
+
+Refer to the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/getting-started.html) for instructions on how to write a Lambda function. In the following example, the function calls an upstream service on port `2345`:
+
+
+```go
+package main
+
+import (
+ "context"
+ "io"
+ "fmt"
+ "net/http"
+ "github.com/aws/aws-lambda-go/lambda"
+)
+
+type Response struct {
+ StatusCode int `json:"statusCode"`
+ Body string `json:"body"`
+}
+
+func HandleRequest(ctx context.Context, _ interface{}) (Response, error) {
+ resp, err := http.Get("http://localhost:2345")
+ fmt.Println("Got response", resp)
+ if err != nil {
+ return Response{StatusCode: 500, Body: "Something bad happened"}, err
+ }
+
+ if resp.StatusCode != 200 {
+ return Response{StatusCode: resp.StatusCode, Body: resp.Status}, err
+ }
+
+ defer resp.Body.Close()
+
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return Response{StatusCode: 500, Body: "Error decoding body"}, err
+ }
+
+ return Response{StatusCode: 200, Body: string(b)}, nil
+}
+
+func main() {
+ lambda.Start(HandleRequest)
+}
+```
+
+## Deploy the Lambda function
+
+1. Create and apply an IAM policy that allows the Lambda function’s role to fetch the Lambda extension’s data from the AWS Parameter Store. The following example, creates an IAM role for the Lambda function, creates an IAM policy with the necessary permissions and attaches the policy to the role:
+
+
+
+ ```hcl
+ resource "aws_iam_role" "lambda" {
+ name = "lambda-role"
+
+ assume_role_policy = <
+
+1. Configure and deploy the Lambda function. Refer to the [Lambda extension configuration](#lambda-extension-configuration) reference for information about all available options. There are several methods for deploying Lambda functions. The following example uses Terraform to deploy a function that can invoke the `static-server` upstream service using mTLS data stored under the `/lambda_extension_data` prefix:
+
+
+
+ ```hcl
+ resource "aws_lambda_function" "example" {
+ …
+ function_name = "lambda"
+ role = aws_iam_role.lambda.arn
+ tags = {
+ "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled" = "true"
+ }
+ variables = {
+ environment = {
+ CONSUL_MESH_GATEWAY_URI = var.mesh_gateway_http_addr
+ CONSUL_SERVICE_UPSTREAMS = "static-server:2345:dc1"
+ CONSUL_EXTENSION_DATA_PREFIX = "/lambda_extension_data"
+ }
+ }
+ layers = [aws_lambda_layer_version.consul_lambda_extension.arn]
+ ```
+
+
+
+1. Run the `terraform apply` command and Consul automatically configures a service for the Lambda function.
+
+### Lambda extension configuration
+
+Define the following environment variables in your Lambda functions to configure the Lambda extension. The variables apply to each Lambda function in your environment:
+
+| Variable | Description | Default |
+| --- | --- | --- |
+| `CONSUL_MESH_GATEWAY_URI` | Specifies the URI where the mesh gateways that the plugin makes requests are running. The mesh gateway should be registered in the same Consul datacenter and partition that the service is running in. For optimal performance, this mesh gateway should run in the same AWS region. | none |
+| `CONSUL_EXTENSION_DATA_PREFIX` | Specifies the prefix that the plugin pulls configuration data from. The data must be located in the following directory: `"${CONSUL_EXTENSION_DATA_PREFIX}/${CONSUL_SERVICE_PARTITION}/${CONSUL_SERVICE_NAMESPACE}/"` | none |
+| `CONSUL_SERVICE_NAMESPACE` | Specifies the Consul namespace the service is registered into. | `default` |
+| `CONSUL_SERVICE_PARTITION` | Specifies the Consul partition the service is registered into. | `default` |
+| `CONSUL_REFRESH_FREQUENCY` | Specifies the amount of time the extension waits before re-pulling data from the Parameter Store. Use [Go `time.Duration`](https://pkg.go.dev/time@go1.19.1#ParseDuration) string values, for example, `"30s"`. The time is added to the duration configured in the Lambda registrator `sync_frequency_in_minutes` configuration. Refer to [Lambda registrator configuration options](/consul/docs/lambda/registration/automate#lambda-registrator-configuration-options). The combined configurations determine how stale the data may become. Lambda functions can run for up to 14 hours, so we recommend configuring a value that results in acceptable staleness for certificates. | `"5m"` |
+| `CONSUL_SERVICE_UPSTREAMS` | Specifies a comma-separated list of upstream services that the Lambda function can call. Specify the value as an unlabelled annotation according to the [`consul.hashicorp.com/connect-service-upstreams` annotation format](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) in Consul on Kubernetes. For example, `"[service-name]:[port]:[optional-datacenter]"` | none |
+
+## Invoke the Lambda function
+
+If _intentions_ are enabled in the Consul service mesh, you must create an intention that allows the Lambda function's Consul service to invoke all upstream services prior to invoking the Lambda function. Refer to [Service mesh intentions](/consul/docs/secure-mesh/intention) for additional information.
+
+There are several ways to invoke Lambda functions. In the following example, the `aws lambda invoke` CLI command invokes the function:
+
+```shell-session
+$ aws lambda invoke --function-name lambda /dev/stdout | cat
+```
diff --git a/website/content/docs/connect/manage-traffic/failover/index.mdx b/website/content/docs/connect/manage-traffic/failover/index.mdx
deleted file mode 100644
index 52030e40689d..000000000000
--- a/website/content/docs/connect/manage-traffic/failover/index.mdx
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: docs
-page_title: Failover configuration overview
-description: Learn about failover strategies and service mesh features you can implement to route traffic if services become unhealthy or unreachable, including sameness groups, prepared queries, and service resolvers.
----
-
-# Failover overview
-
-Services in your mesh may become unhealthy or unreachable for many reasons, but you can mitigate some of the effects associated with infrastructure issues by configuring Consul to automatically route traffic to and from failover service instances. This topic provides an overview of the failover strategies you can implement with Consul.
-
-## Service failover strategies in Consul
-
-There are several methods for implementing failover strategies between datacenters in Consul. You can adopt one of the following strategies based on your deployment configuration and network requirements:
-
-- Configure the `Failover` stanza in a service resolver configuration entry to explicitly define which services should failover and the targeting logic they should follow.
-- Make a prepared query for each service that you can use to automate geo-failover.
-- Create a sameness group to identify partitions with identical namespaces and service names to establish default failover targets.
-
-The following table compares these strategies in deployments with multiple datacenters to help you determine the best approach for your service:
-
-| | `Failover` stanza | Prepared query | Sameness groups |
-| --- | :---: | :---: | :---: |
-| **Supports WAN federation** | ✅ | ✅ | ❌ |
-| **Supports cluster peering** | ✅ | ❌ | ✅ |
-| **Supports locality-aware routing** | ✅ | ❌ | ✅ |
-| **Multi-datacenter failover strength** | ✅ | ❌ | ✅ |
-| **Multi-datacenter usage scenario** | Enables more granular logic for failover targeting. | Central policies that can automatically target the nearest datacenter. | Group size changes without edits to existing member configurations. |
-| **Multi-datacenter usage scenario** | Configuring failover for a single service or service subset, especially for testing or debugging purposes | WAN-federated deployments where a primary datacenter is configured. Prepared queries are not replicated over peer connections. | Cluster peering deployments with consistently named services and namespaces. |
-
-Although cluster peering connections support the [`Failover` field of the prepared query request schema](/consul/api-docs/query#failover) when using Consul's service discovery features to [perform dynamic DNS queries](/consul/docs/services/discovery/dns-dynamic-lookups), they do not support prepared queries for service mesh failover scenarios.
-
-### Failover configurations for a service mesh with a single datacenter
-
-You can implement a service resolver configuration entry and specify a pool of failover service instances that other services can exchange messages with when the primary service becomes unhealthy or unreachable. We recommend adopting this strategy as a minimum baseline when implementing Consul service mesh and layering additional failover strategies to build resilience into your application network.
-
-Refer to the [`Failover` configuration ](/consul/docs/connect/config-entries/service-resolver#failover) for examples of how to configure failover services in the service resolver configuration entry on both VMs and Kubernetes deployments.
-
-### Failover configuration for WAN-federated datacenters
-
-If your network has multiple Consul datacenters that are WAN-federated, you can configure your applications to look for failover services with prepared queries. [Prepared queries](/consul/api-docs/) are configurations that enable you to define complex service discovery lookups. This strategy hinges on the secondary datacenter containing service instances that have the same name and residing in the same namespace as their counterparts in the primary datacenter.
-
-Refer to the [Automate geo-failover with prepared queries tutorial](/consul/tutorials/developer-discovery/automate-geo-failover) for additional information.
-
-### Failover configuration for peered clusters and partitions
-
-In networks with multiple datacenters or partitions that share a peer connection, each datacenter or partition functions as an independent unit. As a result, Consul does not correlate services that have the same name, even if they are in the same namespace.
-
-You can configure sameness groups for this type of network. Sameness groups allow you to define a group of admin partitions where identical services are deployed in identical namespaces. After you configure the sameness group, you can reference the `SamenessGroup` parameter in service resolver, exported service, and service intention configuration entries, enabling you to add or remove cluster peers from the group without making changes to every cluster peer every time.
-
-You can configure a sameness group so that it functions as the default for failover behavior. You can also reference sameness groups in a service resolver's `Failover` stanza or in a prepared query. Refer to [Failover with sameness groups](/consul/docs/connect/manage-traffic/failover/sameness) for more information.
-
-## Locality-aware routing
-
-By default, Consul balances traffic to all healthy upstream instances in the cluster, even if the instances are in different network regions and zones. You can configure Consul to route requests to upstreams in the same region and zone, which reduces latency and transfer costs. Refer to [Route traffic to local upstreams](/consul/docs/connect/manage-traffic/route-to-local-upstreams) for additional information.
\ No newline at end of file
diff --git a/website/content/docs/connect/manage-traffic/failover/sameness.mdx b/website/content/docs/connect/manage-traffic/failover/sameness.mdx
deleted file mode 100644
index ac8c8745fecc..000000000000
--- a/website/content/docs/connect/manage-traffic/failover/sameness.mdx
+++ /dev/null
@@ -1,203 +0,0 @@
----
-layout: docs
-page_title: Failover with sameness groups
-description: You can configure sameness groups so that when a service instance fails, traffic automatically routes to an identical service instance. Learn how to use sameness groups to create a failover strategy for deployments with multiple datacenters and cluster peering connections.
----
-
-# Failover with sameness groups
-
-This page describes how to use sameness groups to automatically redirect service traffic to healthy instances in failover scenarios. Sameness groups are a user-defined set of Consul admin partitions with identical registered services. These admin partitions typically belong to Consul datacenters in different cloud regions, which enables sameness groups to participate in several service failover configuration strategies.
-
-To create a sameness group and configure each Consul datacenter to allow traffic from other members of the group, refer to [create sameness groups](/consul/docs/connect/cluster-peering/usage/create-sameness-groups).
-
-## Failover strategies
-
-You can edit a sameness group configuration entry so that all services failover to healthy instances on other members of a sameness group by default. You can also reference the sameness group in other configuration entries to enact other failover strategies for your network.
-
-You can establish a failover strategy by configuring sameness group behavior in the following locations:
-
-- Sameness group configuration entry
-- Service resolver configuration entry
-- Prepared queries
-
-You can also configure service instances to route to upstreams in the same availability region during a failover. Refer to [Route traffic to local upstreams](/consul/docs/connect/manage-traffic/route-to-local-upstreams) for additional information.
-
-### Failover with a sameness group configuration entry
-
-To define failover behavior using a sameness group configuration entry, set `DefaultForFailover=true` and then apply the updated configuration to all clusters that are members of the group.
-
-In the following example configuration entry, datacenter `dc1` has two partitions, `partition-1` and `partition-2`. A second datacenter, `dc2`, has a single partition named `partition-1`. All three partitions have identically configured services and established cluster peering connections. The configuration entry defines a sameness group, `example-sg` in `dc1`. When redirecting traffic during a failover scenario, Consul attempts to find a healthy instance in a specific order: `dc1-partition-1`, then `dc1-partition-2`, then `dc2-partition-1`.
-
-
-
-
-
-```hcl
-Kind = "sameness-group"
-Name = "example-sg"
-Partition = "partition-1"
-DefaultForFailover = true
-Members = [
- {Partition = "partition-1"},
- {Partition = "partition-2"},
- {Peer = "dc2-partition-1"}
- ]
-```
-
-
-
-
-
-```
-{
- "Kind": "sameness-group",
- "Name": "example-sg",
- "Partition": "partition-1",
- "DefaultForFailover": true,
- "Members": [
- {
- "Partition": "partition-1"
- },
- {
- "Partition": "partition-2"
- },
- {
- "Peer": "dc2-partition-1"
- }
- ]
-}
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: SamenessGroup
-metadata:
- name: example-sg
-spec:
- defaultForFailover: true
- members:
- - partition: partition-1
- - partition: partition-2
- - peer: dc2-partition-1
-```
-
-
-
-
-When a sameness group is configured as the failover default, sameness group failover takes place when a service resolver configuration entry does not implement more specific failover behavior. When a service resolver is defined for an upstream, it is used instead of the sameness group for default failover behavior.
-
-All services registered in the admin partition must failover to another member of the sameness group. You cannot choose subsets of services to use the sameness group as the failover default. If groups do not have identical services, or if a service is registered to some group members but not all members, this failover strategy may produce errors.
-
-For more information about specifying sameness group members and failover, refer to [sameness group configuration entry reference](/consul/docs/connect/config-entries/sameness-group).
-
-### Failover with a service resolver configuration entry
-
-When the sameness group is not configured as the failover default, you can reference the sameness group in a service resolver configuration entry. This approach enables you to use the sameness group as the failover destination for some services registered to group members.
-
-In the following example configuration, a database service called `db` is filtered into subsets based on a user-defined `version` tag. Services with a `v1` tag belong to the default subset, which uses the `product-group` sameness group for its failover. Instances of `db` with the `v2` tag, meanwhile, fail over to a service named `canary-db`.
-
-
-
-
-
-```hcl
-Kind = "service-resolver"
-Name = "db"
-DefaultSubset = "v1"
-Subsets = {
- v1 = {
- Filter = "Service.Meta.version == v1"
- }
- v2 = {
- Filter = "Service.Meta.version == v2"
- }
-}
-Failover {
- v1 = {
- SamenessGroup = "product-group"
- }
- v2 = {
- Service = "canary-db"
- }
-}
-```
-
-
-
-
-
-```
-{
- "Kind": "service-resolver",
- "Name": "db",
- "DefaultSubset": "v1",
- "Subsets": {
- "v1": {
- "Filter": "Service.Meta.version == v1"
- },
- "v2": {
- "Filter": "Service.Meta.version == v2"
- }
- },
- "Failover": {
- "v1": {
- "SamenessGroup": "product-group"
- },
- "v2": {
- "Service": "canary-db"
- }
- }
-}
-```
-
-
-
-
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ServiceResolver
-metadata:
- name: db
-spec:
- defaultSubset: v1
- subsets:
- v1:
- filter: 'Service.Meta.version == v1'
- v2:
- filter: 'Service.Meta.version == v2'
- failover:
- v1:
- samenessGroup: "product-group"
- v2:
- service: "canary-db"
-```
-
-
-
-
-For more information, including additional examples, refer to [service resolver configuration entry reference](/consul/docs/connect/config-entries/service-resolver).
-
-### Failover with a prepared query
-
-You can specify a sameness group in a prepared query to return service instances from the first member that has healthy instances. When a member does not have healthy instances, Consul queries group members in the order defined in the list of members in the sameness group configuration entry.
-
-The following example demonstrates a prepared query that can be referenced with the name `query-1`. It queries members of the sameness group for healthy instances of `db` that are registered to the `store-ns` namespace on partitions named `partition-1`.
-
-```json
-{
- "Name": "query-1",
- "Service": {
- "Service": "db",
- "SamenessGroup": "product-group",
- "Partition": "partition-1",
- "Namespace": "store-ns"
- }
-}
-```
-
-In prepared queries, the sameness group is mutually exclusive with the [`Failover`](/consul/api-docs/query#failover) field because the sameness group includes failover targets based on the sameness group’s members. For more information about using prepared queries, refer to [Enable dynamic DNS queries](/consul/docs/services/discovery/dns-dynamic-lookups).
diff --git a/website/content/docs/connect/manage-traffic/index.mdx b/website/content/docs/connect/manage-traffic/index.mdx
deleted file mode 100644
index 29ff68d9cef2..000000000000
--- a/website/content/docs/connect/manage-traffic/index.mdx
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: docs
-page_title: Service mesh traffic management overview
-description: >-
- Consul can route, split, and resolve Layer 7 traffic in a service mesh to support workflows like canary testing and blue/green deployments. Learn about the three configuration entry kinds that define L7 traffic management behavior in Consul.
----
-
-# Service mesh traffic management overview
-
-This topic provides overview information about the application layer traffic management capabilities available in Consul service mesh. These capabilities are also referred to as *Layer 7* or *L7 traffic management*.
-
-## Introduction
-
-Consul service mesh allows you to divide application layer traffic between different subsets of service instances. You can leverage L7 traffic management capabilities to perform complex processes, such as configuring backup services for failover scenarios, canary and A-B testing, blue-green deployments, and soft multi-tenancy in which production, QA, and staging environments share compute resources. L7 traffic management with Consul service mesh allows you to designate groups of service instances in the Consul catalog smaller than all instances of single service and configure when that subset should receive traffic.
-
-You cannot manage L7 traffic with the [built-in proxy](/consul/docs/connect/proxies/built-in),
-[native proxies](/consul/docs/connect/native), or some [Envoy proxy escape hatches](/consul/docs/connect/proxies/envoy#escape-hatch-overrides).
-
-## Discovery chain
-
-Consul uses a series of stages to discover service mesh proxy upstreams. Each stage represents different ways of managing L7 traffic. They are referred to as the _discovery chain_:
-
-- routing
-- splitting
-- resolution
-
-For information about integrating service mesh proxy upstream discovery using the discovery chain, refer to [Discovery Chain for Service Mesh Traffic Management](/consul/docs/connect/manage-traffic/discovery-chain).
-
-The Consul UI shows discovery chain stages in the **Routing** tab of the **Services** page:
-
-
-
-You can define how Consul manages each stage of the discovery chain in a Consul _configuration entry_. [Configuration entries](/consul/docs/connect/config-entries) modify the default behavior of the Consul service mesh.
-
-When managing L7 traffic with cluster peering, there are additional configuration requirements to resolve peers in the discovery chain. Refer to [Cluster peering L7 traffic management](/consul/docs/connect/cluster-peering/usage/peering-traffic-management) for more information.
-
-### Routing
-
-The first stage of the discovery chain is the service router. Routers intercept traffic according to a set of L7 attributes, such as path prefixes and HTTP headers, and route the traffic to a different service or service subset.
-
-Apply a [service router configuration entry](/consul/docs/connect/config-entries/service-router) to implement a router. Service router configuration entries can only reference service splitter or service resolver configuration entries.
-
-
-
-### Splitting
-
-The second stage of the discovery chain is the service splitter. Service splitters split incoming requests and route them to different services or service subsets. Splitters enable staged canary rollouts, versioned releases, and similar use cases.
-
-Apply a [service splitter configuration entry](/consul/docs/connect/config-entries/service-splitter) to implement a splitter. Service splitters configuration entries can only reference other service splitters or service resolver configuration entries.
-
-
-
-If multiple service splitters are chained, Consul flattens the splits so that they behave as a single service spitter. In the following equation, `splitter[B]` references `splitter[A]`:
-
-```text
-splitter[A]: A_v1=50%, A_v2=50%
-splitter[B]: A=50%, B=50%
----------------------
-splitter[effective_B]: A_v1=25%, A_v2=25%, B=50%
-```
-
-
-### Resolution
-
-The third stage of the discovery chain is the service resolver. Service resolvers specify which instances of a service satisfy discovery requests for the provided service name. Service resolvers enable several use cases, including:
-
-- Designate failovers when service instances become unhealthy or unreachable.
-- Configure service subsets based on DNS values.
-- Route traffic to the latest version of a service.
-- Route traffic to specific Consul datacenters.
-- Create virtual services that route traffic to instances of the actual service in specific Consul datacenters.
-
-Apply a [service resolver configuration entry](/consul/docs/connect/config-entries/service-resolver) to implement a resolver. Service resolver configuration entries can only reference other service resolvers.
-
-
-
-
-If no resolver is configured for a service, Consul sends all traffic to healthy instances of the service that have the same name in the current datacenter or specified namespace and ends the discovery chain.
-
-Service resolver configuration entries can also process network layer, also called level 4 (L4), traffic. As a result, you can implement service resolvers for services that communicate over `tcp` and other non-HTTP protocols.
-
-## Locality-aware routing
-
-By default, Consul balances traffic to all healthy upstream instances in the cluster, even if the instances are in different network regions and zones. You can configure Consul to route requests to upstreams in the same region and zone, which reduces latency and transfer costs. Refer to [Route traffic to local upstreams](/consul/docs/connect/manage-traffic/route-to-local-upstreams) for additional information.
diff --git a/website/content/docs/connect/manage-traffic/limit-request-rates.mdx b/website/content/docs/connect/manage-traffic/limit-request-rates.mdx
deleted file mode 100644
index adf2af5b706d..000000000000
--- a/website/content/docs/connect/manage-traffic/limit-request-rates.mdx
+++ /dev/null
@@ -1,144 +0,0 @@
----
-layout: docs
-page_title: Limit request rates to services in the mesh
-description: Learn how to limit the rate of requests to services in a Consul service mesh. Rate limits on requests improves network resilience and availability.
----
-
-# Limit request rates to services in the mesh
-
-This topic describes how to configure Consul to limit the request rate to services in the mesh.
-
- This feature is available in Consul Enterprise.
-
-## Introduction
-
-Consul allows you to configure settings to limit the rate of HTTP requests a service receives from sources in the mesh. Limiting request rates is one strategy for building a resilient and highly-available network.
-
-Consul applies rate limits per service instance. As an example, if you specify a rate limit of 100 requests per second (RPS) for a service and five instances of the service are available, the service accepts a total of 500 RPS, which equals 100 RPS per instance.
-
-You can limit request rates for all traffic to a service, as well as set rate limits for specific URL paths on a service. When multiple rate limits are configured on a service, Consul applies the limit configured for the first matching path. As a result, the maximum RPS for a service is equal to the number of service instances deployed for a service multiplied by either the rate limit configured for that service or the rate limit for the path.
-
-## Requirements
-
-Consul Enterprise v1.17.0 or later
-
-## Limit request rates to a service on all paths
-
-Specify request rate limits in the service defaults configuration entry. Create or edit the existing service defaults configuration entry for your service and specify the following fields:
-
-
-
-
-1. `RateLimits.InstanceLevel.RequestPerSecond`: Set an average number of requests per second that Consul should allow to the service. The number of requests may momentarily exceed this value up to the value specified in the `RequestsMaxBurst` parameter, but Consul temporarily lowers the speed of the transactions.
-1. `RateLimits.InstanceLevel.RequestsMaxBurst`: Set the maximum number of concurrent requests that Consul momentarily allows to the service. Consul blocks any additional requests over this limit.
-
-The following example configures the default behavior for a service named `billing`. This configuration limits each instance of the billing service to an average of 1000 requests per second, but allows the service to accept up to 1500 concurrent requests.
-
-```hcl
-Kind = "service-defaults"
-Name = "billing"
-Protocol = "http"
-
-RateLimit {
- InstanceLevel {
- RequestsPerSecond = 1000
- RequestsMaxBurst = 1500
- }
-}
-```
-
-
-
-
-1. `spec.rateLimits.instanceLevel.requestPerSecond`: Set an average number of requests per second that Consul should allow to the service. The number of requests may momentarily exceed this value up to the value specified in the `requestsMaxBurst` parameter, but Consul temporarily lowers the speed of the transactions.
-1. `spec.rateLimits.instanceLevel.requestsMaxBurst`: Set the maximum number of concurrent requests that Consul momentarily allows to the service. Consul blocks any additional requests over this limit.
-
-The following example configures the default behavior for a service named `billing`. This configuration limits each instance of the billing service to an average of 1000 requests per second, but allows the service to accept up to 1500 concurrent requests.
-
-```yaml
-kind: ServiceDefaults
-name: billing
-protocol: http
-rateLimit:
- instanceLevel:
- requestsPerSecond: 1000
- requestsMaxBurst: 1500
-```
-
-
-
-
-Refer to the [service defaults configuration entry reference](/consul/docs/connect/config-entries/service-defaults) for additional specifications and example configurations.
-
-## Specify request rate limits for specific paths
-
-Specify request rate limits in the service defaults configuration entry. Create or edit the existing service defaults configuration entry for your service and configure the following parameters:
-
-
-
-
-1. Add a `RateLimits.InstanceLevel.Routes` block to the configuration entry. The block contains the limits and matching criteria for determining which paths to set limits on.
-1. In the `Routes` block, configure one of the following match criteria to determine which path to set the limits on:
- - `PathExact`: Specifies the exact path to match on the request path.
- - `PathPrefix`: Specifies the path prefix to match on the request path.
- - `PathRegex`: Specifies a regular expression to match on the request path.
-1. Configure the limits you want to enforce in the `Routes` block as well. You can configure the following parameters:
- - `RequestsPerSecond`: Set an average number of requests per second that Consul should allow to the service through the matching path. The number of requests may momentarily exceed this value up to the value specified in the `RequestsMaxBurst` parameter, but Consul temporarily lowers the speed of the transactions. This configuration overrides the value specified in `RateLimits.InstanceLevel.RequestPerSecond` field of the configuration entry.
- - `RequestsMaxBurst`: Set the maximum number of concurrent requests that Consul momentarily allows to the service through the matching path. Consul blocks any additional requests over this limit. This configuration overrides the value specified in `RateLimits.InstanceLevel.RequestsMaxBurst` field of the configuration entry.
-
-The following example configures the default behavior for a service named `billing`. This configuration limits each instance of the billing service depending on the path it received the request on. The service is limited to an average of 500 requests when the request is made on an HTTP path with the `/api` prefix. When an instance of the billing service receives a request from the `/login` path, it is limited to an average of 100 requests per second and 500 concurrent connections.
-
-```hcl
-Kind = "service-defaults"
-Name = "billing"
-Protocol = "http"
-
-RateLimit {
- InstanceLevel {
- Routes = [
- {
- PathPrefix = "/api"
- RequestsPerSecond = 500
- },
- {
- PathPrefix = "/login"
- RequestsPerSecond = 100
- RequestsMaxBurst = 500
- }
- ]
- }
-}
-```
-
-
-
-
-1. Add a `spec.rateLimits.instanceLevel.routes` block to the configuration entry. The block contains the limits and matching criteria for determining which paths to set limits on.
-1. In the `routes` block, configure one of the following match criteria for enabling Consul to determine which path to set the limits on:
- - `pathExact`: Specifies the exact path to match on the request path. When using this field.
- - `pathPrefix`: Specifies the path prefix to match on the request path.
- - `pathRegex`: Specifies a regular expression to match on the request path.
-1. Configure the limits you want to enforce in the `routes` block as well. You can configure the following parameters:
- - `requestsPerSecond`: Set an average number of requests per second that Consul should allow to the service through the matching path. The number of requests may momentarily exceed this value up to the value specified in the `requestsMaxBurst` parameter, but Consul temporarily lowers the speed of the transactions. This configuration overrides the value specified in `spec.rateLimits.instanceLevel.requestPerSecond` field of the CRD.
- - `requestsMaxBurst`: Set the maximum number of concurrent requests that Consul momentarily allows to the service through the matching path. Consul blocks any additional requests over this limit. This configuration overrides the value specified in `spec.rateLimits.instanceLevel.requestsMaxBurst` field of the CRD.
-
-The following example configures the default behavior for a service named `billing`. This configuration limits each instance of the billing service depending on the path it received the request on. The service is limited to an average of 500 requests when the request is made on an HTTP path with the `/api` prefix. When an instance of the billing service receives a request from the `/login` path, it is limited to an average of 100 requests per second and 500 concurrent connections.
-
-```yaml
-kind: service-defaults
-name: billing
-protocol: http
-rateLimit:
- instanceLevel:
- routes:
- - pathPrefix: /api
- requestsPerSecond: 500
- - pathPrefix: /login
- requestsPerSecond: 100
- requestsMaxBurst: 500
-```
-
-
-
-
-Refer to the [service defaults configuration entry reference](/consul/docs/connect/config-entries/service-defaults) for additional specifications and example configurations.
\ No newline at end of file
diff --git a/website/content/docs/connect/manage-traffic/route-to-local-upstreams.mdx b/website/content/docs/connect/manage-traffic/route-to-local-upstreams.mdx
deleted file mode 100644
index 4d5be2e5b55d..000000000000
--- a/website/content/docs/connect/manage-traffic/route-to-local-upstreams.mdx
+++ /dev/null
@@ -1,361 +0,0 @@
----
-layout: docs
-page_title: Route traffic to local upstreams
-description: Learn how to enable locality-aware routing in Consul so that proxies can send traffic to upstreams in the same region and zone as the downstream service. Routing traffic based on locality can reduce latency and cost.
----
-
-# Route traffic to local upstreams
-
-This topic describes how to enable locality-aware routing so that Consul can prioritize sending traffic to upstream services that are in the same region and zone as the downstream service.
-
- This feature is available in Consul Enterprise.
-
-## Introduction
-
-By default, Consul balances traffic to all healthy upstream instances in the cluster, even if the instances are in different network zones. You can specify the cloud service provider (CSP) locality for Consul server agents and services registered to the service mesh, which enables several benefits:
-
-- Consul prioritizes the nearest upstream instances when routing traffic through the mesh.
-- When upstream service instances becomes unhealthy, Consul prioritizes failing over to instances that are in the same region as the downstream service. Refer to [Failover](/consul/docs/connect/traffic-management/failover) for additional information about failover strategies in Consul.
-
-When properly implemented, routing traffic to local upstreams can reduce latency and transfer costs associated with sending requests to other regions.
-
-
-### Workflow
-
-For networks deployed to virtual machines, complete the following steps to route traffic to local upstream services:
-
-1. Specify the region and zone for your Consul client agents. This allows services to inherit the region and zone configured for the Consul agent that the services are registered with.
-1. Specify the localities of your service instances. This step is optional and is only necessary when defining a custom network topology or when your deployed environment requires explicitly set localities for certain service's instances.
-1. Configure service mesh proxies to route traffic locally within the partition.
-
-#### Container orchestration platforms
-
-If you deployed Consul to a Kubernetes or ECS environment using `consul-k8s` or `consul-ecs`, service instance locality information is inherited from the host machine. As a result, you do not need to specify the regions and zones on containerized platforms unless you are implementing a custom deployment.
-
-On Kubernetes, Consul automatically populates geographic information about service instances using the `topology.kubernetes.io/region` and `topology.kubernetes.io/zone` labels from the Kubernetes nodes. On AWS ECS, Consul uses the `AWS_REGION` environment variable and `AvailabilityZone` attribute of the ECS task meta.
-
-### Requirements
-
-You should only enable locality-aware routing when each set of external upstream instances within the same zone and region have enough capacity to handle requests from downstream service instances in their respective zones. Locality-aware routing is an advanced feature that may adversely impact service capacity if used incorrectly. When enabled, Consul routes all traffic to the nearest set of service instances and only fails over when no healthy instances are available in the nearest set.
-
-## Specify the locality of your Consul agents
-
-The `locality` configuration on a Consul client applies to all services registered to the client.
-
-1. Configure the `locality` block in your Consul client agent configuration files. The `locality` block is a map containing the `region` and `zone` parameters.
-
- The parameters should match the values for regions and zones defined in your network. Refer to [`locality`](/consul/docs/agent/config/config-files#locality) in the agent configuration reference for additional information.
-
-1. Start or restart the agent to apply the configuration. Refer to [Starting a Consul agent](/consul/docs/agent#starting-the-consul-agent) for instructions.
-
-In the following example, the agent is running in the `us-west-1` region and `us-west-1a` zone on AWS:
-
-```hcl
-locality = {
- region = "us-west-1"
- zone = "us-west-1a"
-}
-```
-
-## Specify the localities of your service instances (optional)
-
-This step is optional in most scenarios. Refer to [Workflow](#workflow) for additional information.
-
-1. Configure the `locality` block in your service definition for both downstream (client) and upstream services. The `locality` block is a map containing the `region` and `zone` parameters. When you start a proxy for the service, Consul passes the locality to the proxy so that it can route traffic accordingly.
-
- The parameters should match the values for regions and zones defined in your network. Refer to [`locality`](/consul/docs/services/configuration/services-configuration-reference#locality) in the services configuration reference for additional information.
-
-1. Verify that your service is also configured with a proxy. Refer to [Define service mesh proxy](/consul/docs/connect/proxies/deploy-sidecar-services#define-service-mesh-proxy) for additional information.
-Register or re-register the service to apply the configuration. Refer to [Register services and health checks](/consul/docs/services/usage/register-services-checks) for instructions.
-
-In the following example, the `web` service is available in the `us-west-1` region and `us-west-1a` zone on AWS:
-
-```hcl
-service {
- id = "web"
- locality = {
- region = "us-west-1"
- zone = "us-west-1a"
- }
- connect = { sidecar_service = {} }
-}
-```
-
-If registering services manually via the `/agent/service/register` API endpoint, you can specify the `locality` configuration in the payload. Refer to [Register Service](/consul/api-docs/agent/service#register-service) in the API documentation for additional information.
-
-## Enable service mesh proxies to route traffic locally
-
-You can configure the default routing behavior for all proxies in the mesh as well as configure the routing behavior for specific services.
-
-### Configure default routing behavior
-
-Configure the `PrioritizeByLocality` block in the proxy defaults configuration entry and specify the `failover` mode. This configuration enables proxies in the mesh to use the region and zone defined in the service configuration to route traffic. Refer to [`PrioritizeByLocality`](/consul/docs/connect/config-entries/proxy-defaults#prioritizebylocality) in the proxy defaults reference for details about the configuration.
-
-
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-PrioritizeByLocality = {
- Mode = "failover"
-}
-```
-
-
-
-
-
-
-```json
-{
- "kind": "proxy-defaults",
- "name": "global",
- "prioritizeByLocality": {
- "mode": "failover"
- }
-}
-```
-
-
-
-
-
-```yaml
-apiversion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- prioritizeByLocality:
- mode: failover
-```
-
-
-
-
-
-Apply the configuration by either running the [`consul config write` CLI command](/consul/commands/config/write), applying the Kubernetes CRD, or calling the [`/config` HTTP API endpoint](/consul/api-docs/config).
-
-
-
-
- ```shell-session
- $ consul config write proxy-defaults.hcl
- ```
-
-
-
-
-
- ```shell-session
- $ kubectl apply -f proxy-defaults.yaml
- ```
-
-
-
-
- ```shell-session
- $ curl --request PUT --data @proxy-defaults.hcl http://127.0.0.1:8500/v1/config
- ```
-
-
-
-
-### Configure routing behavior for individual service
-
-1. Create a service resolver configuration entry and specify the following fields:
- - `Name`: The name of the target upstream service for which downstream clients should use locality-aware routing.
- - `PrioritizeByLocality`: This block enables proxies in the mesh to use the region and zone defined in the service configuration to route traffic. Set the `mode` inside the block to `failover`. Refer to [`PrioritizeByLocality`](/consul/docs/connect/config-entries/service-resolver#prioritizebylocality) in the service resolver reference for details about the configuration.
-
-
-
-
-
- ```hcl
- Kind = "service-resolver"
- Name = "api"
- PrioritizeByLocality = {
- Mode = "failover"
- }
- ```
-
-
-
-
-
-
- ```json
- {
- "kind": "service-resolver",
- "name": "api",
- "prioritizeByLocality": {
- "mode": "failover"
- }
- }
- ```
-
-
-
-
-
- ```yaml
- apiversion: consul.hashicorp.com/v1alpha1
- kind: ServiceResolver
- metadata:
- name: api
- spec:
- prioritizeByLocality:
- mode: failover
- ```
-
-
-
-
-
-1. Apply the configuration by either running the [`consul config write` CLI command](/consul/commands/config/write), applying the Kubernetes CRD, or calling the [`/config` HTTP API endpoint](/consul/api-docs/config).
-
-
-
-
- ```shell-session
- $ consul config write api-resolver.hcl
- ```
-
-
-
-
-
- ```shell-session
- $ kubectl apply -f api-resolver.yaml
- ```
-
-
-
-
- ```shell-session
- $ curl --request PUT --data @api-resolver.hcl http://127.0.0.1:8500/v1/config
- ```
-
-
-
-
-### Configure locality on Kubernetes test clusters explicitly
-
-You can explicitly configure locality for each Kubernetes node so that you can test locality-aware routing with a local Kubernetes cluster or in an environment where `topology.kubernetes.io` labels are not set.
-
-Run the `kubectl label node` command and specify the locality as arguments. The following example specifies the `us-east-1` region and `us-east-1a` zone for the node:
-
-```shell-session
-kubectl label node $K8S_NODE topology.kubernetes.io/region="us-east-1" topology.kubernetes.io/zone="us-east-1a"
-```
-
-After setting these values, subsequent service and proxy registrations in your cluster inherit the values from their local Kubernetes node.
-
-## Verify routes
-
-The routes from each downstream service instance to the nearest set of healthy upstream instances are the most immediately observable routing changes.
-
-Consul configures Envoy's built-in [`overprovisioning_factor`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/endpoint/v3/endpoint.proto#config-endpoint-v3-clusterloadassignment) and [outlier detection](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/outlier_detection.proto#config-cluster-v3-outlierdetection) settings to enforce failover behavior. However, Envoy does not provide granular metrics specific to failover or endpoint traffic within a cluster. As a result, using external observability tools that expose network traffic within your environment is the best method for observing route changes.
-
-To verify that locality-aware routing and failover configurations, you can inspect Envoy's xDS configuration dump for a downstream proxy. Refer to the [consul-k8s CLI docs](https://developer.hashicorp.com/consul/docs/k8s/k8s-cli#proxy-read) for details on how to obtain the xDS configuration dump on Kubernetes. For other workloads, use the Envoy [admin interface](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) and ensure that you [include EDS](https://www.envoyproxy.io/docs/envoy/latest/operations/admin#get--config_dump?include_eds).
-
-Inspect the [priority](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/priority#arch-overview-load-balancing-priority-levels) on each set of endpoints under the upstream `ClusterLoadAssignment` in the `EndpointsConfigDump`. Alternatively, the same priorities should be visible within the output of the [`/clusters?format=json`](https://www.envoyproxy.io/docs/envoy/latest/operations/admin#get--clusters?format=json) admin endpoint.
-
-```json
-{
- "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
- "cluster_name": "web.default.dc1.internal.161d7b5a-bb5f-379c-7d5a-1fc7504f95da.consul",
- "endpoints": [
- {
- "lb_endpoints": [
- {
- "endpoint": {
- "address": {
- "socket_address": {
- "address": "10.42.2.6",
- "port_value": 20000
- }
- },
- "health_check_config": {}
- },
- ...
- },
- ...
- ],
- "locality": {}
- },
- {
- "lb_endpoints": [
- {
- "endpoint": {
- "address": {
- "socket_address": {
- "address": "10.42.3.6",
- "port_value": 20000
- }
- },
- "health_check_config": {}
- },
- ...
- },
- ...
- ],
- "locality": {},
- "priority": 1
- },
- {
- "lb_endpoints": [
- {
- "endpoint": {
- "address": {
- "socket_address": {
- "address": "10.42.0.6",
- "port_value": 20000
- }
- },
- "health_check_config": {}
- },
- ...
- },
- ...
- ],
- "locality": {},
- "priority": 2
- }
- ],
- ...
-}
-```
-
-### Force an observable failover
-
-To force a failover for testing purposes, scale the upstream service instances in the downstream's local zone or region, if no local zone instances are available, to `0`.
-
-Note the following behaviors:
-
- - Consul prioritizes failovers in ascending order starting with `0`. The highest priority, `0`, is not explicitly visible in xDS output. This is because `0` is the default value for that field.
- - After Envoy failover configuration is in place, the specific timing of failover is determined by the downstream Envoy proxy, not Consul. Consul health status may not directly correspond to Envoy's failover behavior, which is also dependent on outlier detection.
-
-Refer to [Troubleshooting](#troubleshooting) if you do not observe the expected behavior.
-
-## Adjust load balancing and failover behavior
-
-You can adjust the global or per-service load balancing and failover behaviors by applying the property override Envoy extension. The property override extension allows you to set and remove individual properties on the Envoy resources Consul generates. Refer to [Configure Envoy proxy properties](/consul/docs/connect/proxies/envoy-extensions/usage/property-override) for additional information.
-
-1. Add the `EnvoyExtensions` configuration block to the service defaults or proxy defaults configuration entry.
-1. Configure the following settings in the `EnvoyExtensions` configuration:
- - [`overprovisioning_factor`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/endpoint/v3/endpoint.proto#config-endpoint-v3-clusterloadassignment)
- - [outlier detection](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/outlier_detection.proto#config-cluster-v3-outlierdetection) configuration.
-1. Apply the configuration. Refer to [Apply the configuration entry](/consul/docs/connect/proxies/envoy-extensions/usage/property-override#apply-the-configuration-entry) for details.
-
-By default, Consul sets `overprovisioning_factor` to `100000`, which enforces total failover, and `max_ejection_percent` to `100`. Refer to the Envoy documentation about these fields before attempting to modify them.
-
-## Troubleshooting
-
-If you do not see the expected priorities, verify that locality is configured in the Consul agent and that `PrioritizeByLocality` is enabled in your proxy defaults or service resolver configuration entry. When `PrioritizeByLocality` is enabled but the local proxy lacks locality configuration, Consul emits a warning log to indicate that the policy could not be applied:
-
-```
-`no local service locality provided, skipping locality failover policy`
-```
diff --git a/website/content/docs/connect/native/go.mdx b/website/content/docs/connect/native/go.mdx
deleted file mode 100644
index e3068058fd1e..000000000000
--- a/website/content/docs/connect/native/go.mdx
+++ /dev/null
@@ -1,253 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Native App Integration - Go Apps
-description: >-
- Consul's service mesh supports native integrations of Go applications into the service mesh through a Go library. Example code demonstrates how to connect your Go applications to the service mesh.
----
-
-# Service Mesh Native Integration for Go Applications
-
-
-
-The Connect Native golang SDK is currently deprecated and will be removed in a future Consul release.
-The SDK will be removed when the long term replacement to native application integration (such as a proxyless gRPC service mesh integration) is delivered. Refer to [GH-10339](https://github.com/hashicorp/consul/issues/10339) for additional information and to track progress toward one potential solution that is tracked as replacement functionality.
-
-
-
-We provide a library that makes it drop-in simple to integrate Consul service mesh
-with most [Go](https://golang.org/) applications. This page shows examples
-of integrating this library for accepting or establishing mesh-based
-connections. For most Go applications, Consul service mesh can be natively integrated
-in just a single line of code excluding imports and struct initialization.
-
-In addition to this, please read and understand the
-[overview of service mesh native integrations](/consul/docs/connect/native).
-In particular, after natively integrating applications with Consul service mesh,
-they must declare that they accept mesh-based connections via their service definitions.
-
-The noun _connect_ is used throughout this documentation and the Go API
-to refer to the connect subsystem that provides Consul's service mesh capabilities.
-
-## Accepting Connections
-
--> **Note:** When calling `ConnectAuthorize()` on incoming connections this library
-will return _deny_ if `Permissions` are defined on the matching intention.
-The method is currently only suited for networking layer 4 (e.g. TCP) integration.
-
-Any server that supports TLS (HTTP, gRPC, net/rpc, etc.) can begin
-accepting mesh-based connections in just a few lines of code. For most
-existing applications, converting the server to accept mesh-based
-connections will require only a one-line change excluding imports and
-structure initialization.
-
-The
-Go library exposes a `*tls.Config` that _automatically_ communicates with
-Consul to load certificates and authorize inbound connections during the
-TLS handshake. This also automatically starts goroutines to update any
-changing certs.
-
-Example, followed by more details:
-
-```go
-import(
- "net/http"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/consul/connect"
-)
-
-func main() {
- // Create a Consul API client
- client, _ := api.NewClient(api.DefaultConfig())
-
- // Create an instance representing this service. "my-service" is the
- // name of _this_ service. The service should be cleaned up via Close.
- svc, _ := connect.NewService("my-service", client)
- defer svc.Close()
-
- // Creating an HTTP server that serves via service mesh
- server := &http.Server{
- Addr: ":8080",
- TLSConfig: svc.ServerTLSConfig(),
- // ... other standard fields
- }
-
- // Serve!
- server.ListenAndServeTLS("", "")
-}
-```
-
-The first step is to create a Consul API client. This is almost always the
-default configuration with an ACL token set, since you want to communicate
-to the local agent. The default configuration will also read the ACL token
-from environment variables if set. The Go library will use this client to request certificates,
-authorize connections, and more.
-
-Next, `connect.NewService` is called to create a service structure representing
-the _currently running service_. This structure maintains all the state
-for accepting and establishing connections. An application should generally
-create one service and reuse that one service for all servers and clients.
-
-Finally, a standard `*http.Server` is created. The magic line is the `TLSConfig`
-value. This is set to a TLS configuration returned by the service structure.
-This TLS configuration is configured to automatically load certificates
-in the background, cache them, and authorize inbound connections. The service
-structure automatically handles maintaining blocking queries to update certificates
-in the background if they change.
-
-Since the service returns a standard `*tls.Config`, _any_ server that supports
-TLS can be configured. This includes gRPC, net/rpc, basic TCP, and more.
-Another example is shown below with just a plain TLS listener:
-
-```go
-import(
- "crypto/tls"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/consul/connect"
-)
-
-func main() {
- // Create a Consul API client
- client, _ := api.NewClient(api.DefaultConfig())
-
- // Create an instance representing this service. "my-service" is the
- // name of _this_ service. The service should be cleaned up via Close.
- svc, _ := connect.NewService("my-service", client)
- defer svc.Close()
-
- // Creating an HTTP server that serves via service mesh
- listener, _ := tls.Listen("tcp", ":8080", svc.ServerTLSConfig())
- defer listener.Close()
-
- // Accept
- go acceptLoop(listener)
-}
-```
-
-## HTTP Clients
-
-For Go applications that need to connect to HTTP-based upstream dependencies,
-the Go library can construct an `*http.Client` that automatically establishes
-mesh-based connections as long as Consul-based service discovery is used.
-
-Example, followed by more details:
-
-```go
-import(
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/consul/connect"
-)
-
-func main() {
- // Create a Consul API client
- client, _ := api.NewClient(api.DefaultConfig())
-
- // Create an instance representing this service. "my-service" is the
- // name of _this_ service. The service should be cleaned up via Close.
- svc, _ := connect.NewService("my-service", client)
- defer svc.Close()
-
- // Get an HTTP client
- httpClient := svc.HTTPClient()
-
- // Perform a request, then use the standard response
- resp, _ := httpClient.Get("https://userinfo.service.consul/user/mitchellh")
-}
-```
-
-The first step is to create a Consul API client and service. These are the
-same steps as accepting connections and are explained in detail in the
-section above. If your application is both a client and server, both the
-API client and service structure can be shared and reused.
-
-Next, we call `svc.HTTPClient()` to return a specially configured
-`*http.Client`. This client will automatically established mesh-based
-connections using Consul service discovery.
-
-Finally, we perform an HTTP `GET` request to a hypothetical userinfo service.
-The HTTP client configuration automatically sends the correct client
-certificate, verifies the server certificate, and manages background
-goroutines for updating our certificates as necessary.
-
-If the application already uses a manually constructed `*http.Client`,
-the `svc.HTTPDialTLS` function can be used to configure the
-`http.Transport.DialTLS` field to achieve equivalent behavior.
-
-### Hostname Requirements
-
-The hostname used in the request URL is used to identify the logical service
-discovery mechanism for the target. **It's not actually resolved via DNS** but
-used as a logical identifier for a Consul service discovery mechanism. It has
-the following specific limitations:
-
-- The scheme must be `https://`.
-- It must be a Consul DNS name in one of the following forms:
- - `.service[.].consul` to discover a healthy service
- instance for a given service.
- - `.query[.].consul` to discover an instance via
- [Prepared Query](/consul/api-docs/query).
-- The top-level domain _must_ be `.consul` even if your cluster has a custom
- `domain` configured for its DNS interface. This might be relaxed in the
- future.
-- Tag filters for services are not currently supported (i.e.
- `tag1.web.service.consul`) however the same behavior can be achieved using a
- prepared query.
-- External DNS names, raw IP addresses and so on will cause an error and should
- be fetched using a separate `HTTPClient`.
-
-## Raw TLS Connection
-
-For a raw `net.Conn` TLS connection, the `svc.Dial` function can be used.
-This will establish a connection to the desired service via the service mesh and
-return the `net.Conn`. This connection can then be used as desired.
-
-Example:
-
-```go
-import (
- "context"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/consul/connect"
-)
-
-func main() {
- // Create a Consul API client
- client, _ := api.NewClient(api.DefaultConfig())
-
- // Create an instance representing this service. "my-service" is the
- // name of _this_ service. The service should be cleaned up via Close.
- svc, _ := connect.NewService("my-service", client)
- defer svc.Close()
-
- // Connect to the "userinfo" Consul service.
- conn, _ := svc.Dial(context.Background(), &connect.ConsulResolver{
- Client: client,
- Name: "userinfo",
- })
-}
-```
-
-This uses a familiar `Dial`-like function to establish raw `net.Conn` values.
-The second parameter to dial is an implementation of the `connect.Resolver`
-interface. The example above uses the `*connect.ConsulResolver` implementation
-to perform Consul-based service discovery. This also automatically determines
-the correct certificate metadata we expect the remote service to serve.
-
-## Static Addresses, Custom Resolvers
-
-In the raw TLS connection example, you see the use of a `connect.Resolver`
-implementation. This interface can be implemented to perform address
-resolution. This must return the address and also the URI SAN expected
-in the TLS certificate served by the remote service.
-
-The Go library provides two built-in resolvers:
-
-- `*connect.StaticResolver` can be used for static addresses where no
- service discovery is required. The expected cert URI SAN must be
- manually specified.
-
-- `*connect.ConsulResolver` which resolves services and prepared queries
- via the Consul API. This also automatically determines the expected
- cert URI SAN.
diff --git a/website/content/docs/connect/native/index.mdx b/website/content/docs/connect/native/index.mdx
deleted file mode 100644
index 3cf64f346c2e..000000000000
--- a/website/content/docs/connect/native/index.mdx
+++ /dev/null
@@ -1,165 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Native App Integration - Overview
-description: >-
- When using sidecar proxies is not possible, applications can natively integrate with Consul service mesh, but have reduced access to service mesh features. Learn how "mesh-native" or "connect-native" apps use mTLS to authenticate with Consul and how to add integrations to service registrations.
----
-
-# Service Mesh Native App Integration Overview
-
-
-
-The Connect Native Golang SDK and `v1/agent/connect/authorize`, `v1/agent/connect/ca/leaf`,
-and `v1/agent/connect/ca/roots` APIs are deprecated and will be removed in a future release. Although Connect Native
-will still operate as designed, we do not recommend leveraging this feature because it is deprecated and will be removed
-removed when the long term replacement to native application integration (such as a proxyless gRPC service mesh integration) is delivered. Refer to [GH-10339](https://github.com/hashicorp/consul/issues/10339) for additional information and to track progress toward one potential solution that is tracked as replacement functionality.
-
-The Native App Integration does not support many of the Consul's service mesh features, and is not under active development.
-The [Envoy proxy](/consul/docs/connect/proxies/envoy) should be used for most production environments.
-
-
-
-Applications can natively integrate with Consul's service mesh API to support accepting
-and establishing connections to other mesh services without the overhead of a
-[proxy sidecar](/consul/docs/connect/proxies). This option is especially useful
-for applications that may be experiencing performance issues with the proxy
-sidecar deployment. This page will cover the high-level overview of
-integration, registering the service, etc. For language-specific examples, see
-the sidebar navigation to the left. It is also required if your service uses
-relies on a dynamic set of upstream services.
-
-Service mesh traffic is just basic mutual TLS. This means that almost any application
-can easily integrate with Consul service mesh. There is no custom protocol in use;
-any language that supports TLS can accept and establish mesh-based
-connections.
-
-We currently provide an easy-to-use [Go integration](/consul/docs/connect/native/go)
-to assist with the getting the proper certificates, verifying connections,
-etc. We plan to add helper libraries for other languages in the future.
-However, without library support, it is still possible for any major language
-to integrate with Consul service mesh.
-
-The noun _connect_ is used throughout this documentation to refer to the connect
-subsystem that provides Consul's service mesh capabilities.
-
-## Overview
-
-The primary work involved in natively integrating with service mesh is
-[acquiring the proper TLS certificate](/consul/api-docs/agent/connect#service-leaf-certificate),
-[verifying TLS certificates](/consul/api-docs/agent/connect#certificate-authority-ca-roots),
-and [authorizing inbound connections or requests](/consul/api-docs/connect/intentions#list-matching-intentions).
-
-All of this is done using the Consul HTTP APIs linked above.
-
-An overview of the sequence is shown below. The diagram and the following
-details may seem complex, but this is a _regular mutual TLS connection_ with
-an API call to verify the incoming client certificate.
-
-
-
--> **Note:** This diagram depicts the simpler networking layer 4 (e.g. TCP) [integration
-mechanism](/consul/api-docs/agent/connect#authorize).
-
-Details on the steps are below:
-
-- **Service discovery** - This is normal service discovery using Consul,
- a static IP, or any other mechanism. If you're using Consul DNS, the
- [`.connect`](/consul/docs/services/discovery/dns-static-lookups#service-mesh-enabled-service-lookups)
- syntax to find mesh-capable endpoints for a service. After service
- discovery, choose one address from the list of **service addresses**.
-
-- **Mutual TLS** - As a client, connect to the discovered service address
- over normal TLS. As part of the TLS connection, provide the
- [service certificate](/consul/api-docs/agent/connect#service-leaf-certificate)
- as the client certificate. Verify the remote certificate against the
- [public CA roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots).
- As a client, if the connection is established then you've established
- a mesh-based connection and there are no further steps!
-
-- **Authorization** - As a server accepting connections, verify the client
- certificate against the [public CA
- roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots). After verifying
- the certificate, parse some basic fields from it and use those to determine
- if the connection should be allowed. How this is done is dependent on
- the level of integration desired:
-
- - **Simple integration (TCP-only)** - Call the [authorizing
- API](/consul/api-docs/agent/connect#authorize) against the local agent. If this returns
- successfully, complete the TLS handshake and establish the connection. If
- authorization fails, close the connection.
-
- -> **NOTE:** This API call is expected to be called in the connection path,
- so if the local Consul agent is down or unresponsive it will effect the
- success rate of new connections. The agent uses locally cached data to
- authorize the connection and typically responds in microseconds. Therefore,
- the impact to the TLS handshake is typically microseconds.
-
- - **Complete integration** - Like how the calls to acquire the leaf
- certificate and CA roots are expected to be done out of band and reused, so
- should the [intention match
- API](/consul/api-docs/connect/intentions#list-matching-intentions). With all of the
- relevant intentions cached for the destination, all enforcement operations
- can be done entirely by the service without calling any Consul APIs in the
- connection or request path. If the service is networking layer 7 (e.g.
- HTTP) aware it can safely enforce intentions per _request_ instead of the
- coarser per _connection_ model.
-
-## Update certificates and certificate roots
-
-The leaf certificate and CA roots can be updated at any time and the
-natively integrated application must react to this relatively quickly
-so that new connections are not disrupted. This can be done through
-Consul blocking queries (HTTP long polling) or through periodic polling.
-
-The API calls for
-[acquiring a service mesh TLS certificate](/consul/api-docs/agent/connect#service-leaf-certificate)
-and [reading service mesh CA roots](/consul/api-docs/agent/connect#certificate-authority-ca-roots)
-both support
-[blocking queries](/consul/api-docs/features/blocking). By using blocking
-queries, an application can efficiently wait for an updated value. For example,
-the leaf certificate API will block until the certificate is near expiration
-or the signing certificates have changed and will issue and return a new
-certificate.
-
-In some languages, using blocking queries may not be simple. In that case,
-we still recommend using the blocking query parameters but with a very short
-`timeout` value set. Doing this is documented with
-[blocking queries](/consul/api-docs/features/blocking). The low timeout will
-ensure the API responds quickly. We recommend that applications poll the
-certificate endpoints frequently, such as multiple times per minute.
-
-The overhead for the blocking queries (long or periodic polling) is minimal.
-The API calls are to the local agent and the local agent uses locally
-cached data multiplexed over a single TCP connection to the Consul leader.
-Even if a single machine has 1,000 mesh-enabled services all blocking
-on certificate updates, this translates to only one TCP connection to the
-Consul server.
-
-Some language libraries such as the
-[Go library](/consul/docs/connect/native/go) automatically handle updating
-and locally caching the certificates.
-
-## Service registration
-
-Mesh-native applications must tell Consul that they support service mesh
-natively. This enables the service to be returned as part of service
-discovery for service mesh-capable services used by other mesh-native applications
-and client [proxies](/consul/docs/connect/proxies).
-
-You can enable native service mesh support directly in the [service definition](/consul/docs/services/configuration/services-configuration-reference#connect) by configuring the `connect` block. In the following example, the `redis` service is configured to support service mesh natively:
-
-```json
-{
- "service": {
- "name": "redis",
- "port": 8000,
- "connect": {
- "native": true
- }
- }
-}
-```
-
-Services that support service mesh natively are still returned through the standard
-service discovery mechanisms in addition to the mesh-only service discovery
-mechanisms.
diff --git a/website/content/docs/connect/nomad.mdx b/website/content/docs/connect/nomad.mdx
index c65f07bc9162..48b14f0d2c43 100644
--- a/website/content/docs/connect/nomad.mdx
+++ b/website/content/docs/connect/nomad.mdx
@@ -1,11 +1,11 @@
---
layout: docs
-page_title: Service Mesh - Nomad Integration
+page_title: Connect Nomad services with Consul
description: >-
Consul's service mesh can be applied to provide secure communication between services managed by Nomad's scheduler and orchestrator functions, including Nomad jobs and task groups. Use the guide and reference documentation to learn more.
---
-# Consul and Nomad Integration
+# Connect Nomad services with Consul
Consul service mesh can be used with [Nomad](https://www.nomadproject.io/) to provide
secure service-to-service communication between Nomad jobs and task groups.
@@ -26,4 +26,4 @@ For reference information about configuring Nomad jobs to use Consul service mes
- [Nomad Job Specification - `sidecar_service`](/nomad/docs/job-specification/sidecar_service)
- [Nomad Job Specification - `sidecar_task`](/nomad/docs/job-specification/sidecar_task)
- [Nomad Job Specification - `proxy`](/nomad/docs/job-specification/proxy)
-- [Nomad Job Specification - `upstreams`](/nomad/docs/job-specification/upstreams)
+- [Nomad Job Specification - `upstreams`](/nomad/docs/job-specification/upstreams)
\ No newline at end of file
diff --git a/website/content/docs/connect/observability/access-logs.mdx b/website/content/docs/connect/observability/access-logs.mdx
deleted file mode 100644
index 377b32b517c2..000000000000
--- a/website/content/docs/connect/observability/access-logs.mdx
+++ /dev/null
@@ -1,253 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Observability - Access Logs
-description: >-
- Consul can emit access logs for application connections and requests that pass through Envoy proxies in the service mesh. Learn how to configure access logs, including minimum configuration requirements and the default log format.
----
-
-# Access Logs
-
-This topic describes configuration and usage for access logs. Consul can emit access logs to record application connections and requests that pass through proxies in a service mesh, including sidecar proxies and gateways.
-You can use the application traffic records in access to logs to help you performance the following operations:
-
- - **Diagnosing and Troubleshooting Issues**: Operators and application owners can identify configuration issues in the service mesh or the application by analyzing failed connections and requests.
- - **Threat Detection**: Operators can review details about unauthorized attempts to access the service mesh and their origins.
- - **Audit Compliance**: Operators can use access less for security compliance requirements for traffic entering and exiting the service mesh through gateways.
-
-Consul supports access logs capture through Envoy proxies started through the [`consul connect envoy`](/consul/commands/connect/envoy) CLI command and [`consul-dataplane`](/consul/docs/connect/dataplane). Other proxies are not supported.
-
-## Enable access logs
-
-Access logs configurations are defined globally in the [`proxy-defaults`](/consul/docs/connect/config-entries/proxy-defaults#accesslogs) configuration entry.
-
-The following example is a minimal configuration for enabling access logs:
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-AccessLogs {
- Enabled = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- accessLogs:
- enabled: true
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "AccessLogs": {
- "Enabled": true
- }
-}
-```
-
-
-
-All proxies, including sidecars and gateways, emit access logs when the behavior is enabled.
-Both inbound and outbound traffic through the proxy are logged, including requests made directly to [Envoy's administration interface](https://www.envoyproxy.io/docs/envoy/latest/operations/admin.html?highlight=administration%20logs#administration-interface).
-
-If you enable access logs after the Envoy proxy was started, access logs for the administration interface are not captured until you restart the proxy.
-
-## Default log format
-
-Access logs use the following format when no additional customization is provided:
-
-~> **Security warning:** The following log format contains IP addresses which may be a data compliance issue, depending on your regulatory environment.
-Operators should carefully inspect their chosen access log format to prevent leaking sensitive or personally identifiable information.
-
-```json
-{
- "start_time": "%START_TIME%",
- "route_name": "%ROUTE_NAME%",
- "method": "%REQ(:METHOD)%",
- "path": "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%",
- "protocol": "%PROTOCOL%",
- "response_code": "%RESPONSE_CODE%",
- "response_flags": "%RESPONSE_FLAGS%",
- "response_code_details": "%RESPONSE_CODE_DETAILS%",
- "connection_termination_details": "%CONNECTION_TERMINATION_DETAILS%",
- "bytes_received": "%BYTES_RECEIVED%",
- "bytes_sent": "%BYTES_SENT%",
- "duration": "%DURATION%",
- "upstream_service_time": "%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)%",
- "x_forwarded_for": "%REQ(X-FORWARDED-FOR)%",
- "user_agent": "%REQ(USER-AGENT)%",
- "request_id": "%REQ(X-REQUEST-ID)%",
- "authority": "%REQ(:AUTHORITY)%",
- "upstream_host": "%UPSTREAM_HOST%",
- "upstream_cluster": "%UPSTREAM_CLUSTER%",
- "upstream_local_address": "%UPSTREAM_LOCAL_ADDRESS%",
- "downstream_local_address": "%DOWNSTREAM_LOCAL_ADDRESS%",
- "downstream_remote_address": "%DOWNSTREAM_REMOTE_ADDRESS%",
- "requested_server_name": "%REQUESTED_SERVER_NAME%",
- "upstream_transport_failure_reason": "%UPSTREAM_TRANSPORT_FAILURE_REASON%"
-}
-```
-
-Depending on the connection type, such TCP or HTTP, some of these fields may be empty.
-
-## Custom log format
-
-Envoy uses [command operators](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators) to expose information about application traffic.
-You can use these fields to customize the access logs that proxies emit.
-
-Custom logs can be either JSON format or text format.
-
-### JSON format
-
-You can format access logs in JSON so that you can parse them with Application Monitoring Platforms (APMs).
-
-To use a custom access log, in the `proxy-defaults` configuration entry, set [`JSONFormat`](/consul/docs/connect/config-entries/proxy-defaults#jsonformat) to the string representation of the desired JSON.
-
-Nesting is supported.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-AccessLogs {
- Enabled = true
- JSONFormat = <
-
-### Text format
-
-To use a custom access log formatted in plaintext, in the `proxy-defaults` configuration entry, set [`TextFormat`](/consul/docs/connect/config-entries/proxy-defaults#textformat) to the desired customized string.
-
-New lines are automatically added to the end of the log to keep each access log on its own line in the output.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-AccessLogs {
- Enabled = true
- TextFormat = "MY START TIME: %START_TIME%, THIS CONNECTIONS PROTOCOL IS %PROTOCOL%"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- accessLogs:
- enabled: true
- textFormat: "MY START TIME: %START_TIME%, THIS CONNECTIONS PROTOCOL IS %PROTOCOL%"
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "AccessLogs": {
- "Enabled": true,
- "JSONFormat": "MY START TIME: %START_TIME%, THIS CONNECTIONS PROTOCOL IS %PROTOCOL%"
- }
-}
-```
-
-
-
-
-## Kubernetes
-
-As part of its normal operation, the Envoy debugging logs for the `consul-dataplane`, `envoy`, or `envoy-sidecar` containers are written to `stderr`.
-The access log [`Type`](/consul/docs/connect/config-entries/proxy-defaults#type) is set to `stdout` by default for access logs when enabled.
-Use a log aggregating solution to separate the machine-readable access logs from the Envoy process debug logs.
-
-## Write to a file
-
-You can configure Consul to write access logs to a file on the host where Envoy runs.
-
-Envoy does not rotate log files. A log rotation solution, such as [logrotate](https://www.redhat.com/sysadmin/setting-logrotate), can prevent access logs from consuming too much of the host's disk space when writing to a file.
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-AccessLogs {
- Enabled = true
- Type = "file"
- Path = "/var/log/envoy/access-logs.txt"
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- accessLogs:
- enabled: true
- type: file
- path: "/var/log/envoy/access-logs.txt"
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "AccessLogs": {
- "Enabled": true,
- "Type": "file",
- "Path": "/var/log/envoy/access-logs.txt"
- }
-}
-```
-
-
diff --git a/website/content/docs/connect/observability/grafanadashboards/index.mdx b/website/content/docs/connect/observability/grafanadashboards/index.mdx
deleted file mode 100644
index 2a21ec6f2fcf..000000000000
--- a/website/content/docs/connect/observability/grafanadashboards/index.mdx
+++ /dev/null
@@ -1,91 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Observability - Dashboards
-description: >-
- This documentation provides an overview of several dashboards designed for monitoring and managing services within a Consul-managed Envoy service mesh. Learn how to enable access logs and configure key performance and operational metrics to ensure the reliability and performance of services in the service mesh.
----
-
-# Dashboards for service mesh observability
-
-This topic describes the configuration and usage of dashboards for monitoring and managing services within a Consul-managed Envoy service mesh. These dashboards provide critical insights into the health, performance, and resource utilization of services. The dashboards described here are essential tools for ensuring the stability, efficiency, and reliability of your service mesh environment.
-
-This page provides reference information about the Grafana dashboard configurations included in the [`grafana` directory in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/tree/main/grafana).
-
-## Dashboards overview
-
-The repository includes the following dashboards:
-
- - **Consul service-to-service dashboard**: Provides a detailed view of service-to-service communications, monitoring key metrics like access logs, HTTP requests, error counts, response code distributions, and request success rates. The dashboard includes customizable filters for focusing on specific services and namespaces.
-
- - **Consul service dashboard**: Tracks key metrics for Envoy proxies at the cluster and service levels, ensuring the performance and reliability of individual services within the mesh.
-
- - **Consul dataplane dashboard**: Offers a comprehensive overview of service health and performance, including request success rates, resource utilization (CPU and memory), active connections, and cluster health. It helps operators maintain service reliability and optimize resource usage.
-
- - **Consul k8s dashboard**: Focuses on monitoring the health and resource usage of the Consul control plane within a Kubernetes environment, ensuring the stability of the control plane.
-
- - **Consul server dashboard**: Provides detailed monitoring of Consul servers, tracking key metrics like server health, CPU and memory usage, disk I/O, and network performance. This dashboard is critical for ensuring the stability and performance of Consul servers within the service mesh.
-
-## Enabling prometheus
-
-Add the following configurations to your Consul Helm chart to enable the prometheus tools.
-
-
-
-```yaml
-global:
- metrics:
- enabled: true
- provider: "prometheus"
- enableAgentMetrics: true
- agentMetricsRetentionTime: "10m"
-
-prometheus:
- enabled: true
-
-ui:
- enabled: true
- metrics:
- enabled: true
- provider: "prometheus"
- baseURL: http://prometheus-server.consul
-```
-
-
-
-## Enable access logs
-
-Access logs configurations are defined globally in the [`proxy-defaults`](/consul/docs/connect/config-entries/proxy-defaults#accesslogs) configuration entry.
-
-The following example is a minimal configuration for enabling access logs:
-
-
-
-```hcl
-Kind = "proxy-defaults"
-Name = "global"
-AccessLogs {
- Enabled = true
-}
-```
-
-```yaml
-apiVersion: consul.hashicorp.com/v1alpha1
-kind: ProxyDefaults
-metadata:
- name: global
-spec:
- accessLogs:
- enabled: true
-```
-
-```json
-{
- "Kind": "proxy-defaults",
- "Name": "global",
- "AccessLogs": {
- "Enabled": true
- }
-}
-```
-
-
diff --git a/website/content/docs/connect/observability/index.mdx b/website/content/docs/connect/observability/index.mdx
deleted file mode 100644
index 95dce806a0ab..000000000000
--- a/website/content/docs/connect/observability/index.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Observability - Overview
-description: >-
- To use Consul's observability features, configure sidecar proxies in the service mesh to collect and emit L7 metrics. Learn about configuring metrics destinations and a service's protocol and upstreams.
----
-
-# Service Mesh Observability Overview
-
-In order to take advantage of the service mesh's L7 observability features you will need
-to:
-
-- Deploy sidecar proxies that are capable of emitting metrics with each of your
- services. We have first class support for Envoy.
-- Define where your proxies should send metrics that they collect.
-- Define the protocols for each of your services.
-- Define the upstreams for each of your services.
-
-If you are using Envoy as your sidecar proxy, you will need to [enable
-gRPC](/consul/docs/agent/config/config-files#grpc_port) on your client agents. To define the
-metrics destination and service protocol you may want to enable [configuration
-entries](/consul/docs/agent/config/config-files#config_entries) and [centralized service
-configuration](/consul/docs/agent/config/config-files#enable_central_service_config).
-
-### Kubernetes
-If you are using Kubernetes, the Helm chart can simplify much of the configuration needed to enable observability. See
-our [Kubernetes observability docs](/consul/docs/k8s/connect/observability/metrics) for more information.
-
-### Metrics destination
-
-For Envoy the metrics destination can be configured in the proxy configuration
-entry's `config` section.
-
-```
-kind = "proxy-defaults"
-name = "global"
-config {
- "envoy_dogstatsd_url": "udp://127.0.0.1:9125"
-}
-```
-
-Find other possible metrics syncs in the [Envoy documentation](/consul/docs/connect/proxies/envoy#bootstrap-configuration).
-
-### Service protocol
-
-You can specify the [`protocol`](/consul/docs/connect/config-entries/service-defaults#protocol)
-for all service instances in the `service-defaults` configuration entry. You can also override the default protocol when defining and registering proxies in a service definition file. Refer to [Expose Paths Configuration Reference](/consul/docs/connect/proxies/proxy-config-reference#expose-paths-configuration-reference) for additional information.
-
-By default, proxies only provide L4 metrics.
-Defining the protocol allows proxies to handle requests at the L7
-protocol and emit L7 metrics. It also allows proxies to make per-request
-load balancing and routing decisions.
-
-### Service upstreams
-
-You can set the upstream for each service using the proxy's
-[`upstreams`](/consul/docs/connect/proxies/proxy-config-reference#upstreams)
-sidecar parameter, which can be defined in a service's [sidecar registration](/consul/docs/connect/proxies/deploy-sidecar-services).
diff --git a/website/content/docs/connect/observability/service.mdx b/website/content/docs/connect/observability/service.mdx
deleted file mode 100644
index 55b4580aff7a..000000000000
--- a/website/content/docs/connect/observability/service.mdx
+++ /dev/null
@@ -1,212 +0,0 @@
----
-layout: docs
-page_title: Monitoring service-to-service communication with Envoy
-description: >-
- Learn to monitor the appropriate metrics when using Envoy proxy.
----
-
-# Monitoring service-to-service communication with Envoy
-
-When running a service mesh with Envoy as the proxy, there are a wide array of possible metrics produced from traffic flowing through the data plane. This document covers a set of scenarios and key baseline metrics and potential alerts that will help you maintain the overall health and resilience of the mesh for HTTP services. In addition, it provides examples of using these metrics in specific ways to generate a Grafana dashboard using a Prometheus backend to better understand how the metrics behave.
-
-When collecting metrics, it is important to establish a baseline. This baseline ensures your Consul deployment is healthy, and serves as a reference point when troubleshooting abnormal Cluster behavior. Once you have established a baseline for your metrics, use them and the following recommendations to configure reasonable alerts for your Consul agent.
-
-
-
- The following examples assume that the operator adds the cluster name (i.e. datacenter) using the label “cluster” and the node name (i.e. machine or pod) using the label “node” to all scrape targets.
-
-
-
-## General scenarios
-
-### Is Envoy's configuration growing stale?
-
-When Envoy connects to the Consul control plane over xDS, it will rapidly converge to the current configuration that the control plane expects it to have. If the xDS stream terminates and does not reconnect for an extended period, then the xDS configuration currently in the Envoy instances will “fail static” and slowly grow out of date.
-
-##### Metric
-
-`envoy_control_plane_connected_state`
-
-#### Alerting
-
-If the value for a given node/pod/machine was 0 for an extended period of time.
-
-#### Example dashboard (table)
-
-```
-group(last_over_time(envoy_control_plane_connected_state{cluster="$cluster"}[1m] ) == 0) by (node)
-```
-
-## Inbound traffic scenarios
-
-### Is this service being sent requests?
-
-Within a mesh, a request travels from one service to another. You may choose to measure many relevant metrics from the calling-side, the serving-side, or both.
-
-It is useful to track the perceived request rate of requests from the calling-side as that would include all requests, even those that fail to arrive at the serving-side due to any failures.
-
-Any measurement of the request rate is also generally useful for capacity planning purposes as increased traffic typically correlates with a need for a scale-up event in the near future.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_total`
-
-#### Alerting
-
-If the value has a significant change, check if services are properly interacting with each other and if you need to increase your Consul agent resource requirements.
-
-#### Example dashboard (plot; rate)
-
-```
-sum(irate(envoy_cluster_upstream_rq_total{consul_destination_datacenter="$cluster",
-consul_destination_service="$service"}[1m])) by (cluster, local_cluster)
-```
-
-### Are requests sent to this service mostly successful?
-
-A service mesh is about communication between services, so it is important to track the perceived success rate of requests witnessed by the calling services.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_xx`
-
-#### Alerting
-
-If the value crosses a user defined baseline.
-
-#### Example dashboard (plot; %)
-
-```
-sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!="5",consul_destination_datacenter="$cluster",consul_destination_service="$service"}[1m])) by (cluster, local_cluster) / sum(irate(envoy_cluster_upstream_rq_xx{consul_destination_datacenter="$cluster",consul_destination_service="$service"}[1m])) by (cluster, local_cluster)
-```
-
-### Are requests sent to this service handled in a timely manner?
-
-If you undersize your infrastructure from a resource perspective, then you may expect a decline in response speed over time. You can track this by plotting the 95th percentile of the latency as experienced by the clients.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_time_bucket`
-
-#### Alerting
-
-If the value crosses a user defined baseline.
-
-#### Example dashboard (plot; value)
-
-```
-histogram_quantile(0.95, sum(rate(envoy_cluster_upstream_rq_time_bucket{consul_destination_datacenter="$cluster",consul_destination_service="$service",local_cluster!=""}[1m])) by (le, cluster, local_cluster))
-```
-
-### Is this service responding to requests that it receives?
-
-Unlike the perceived request rate, which is measured from the calling side, this is the real request rate measured on the serving-side. This is a serving-side parallel metric that can help clarify underlying causes of problems in the calling-side equivalent metric. Ideally this metric should roughly track the calling side values in a 1-1 manner.
-
-##### Metric
-
-`envoy_http_downstream_rq_total`
-
-#### Alerting
-
-If the value crosses a user defined baseline.
-
-#### Example dashboard (plot; rate)
-
-```
-sum(irate(envoy_http_downstream_rq_total{cluster="$cluster",local_cluster="$service",envoy_http_conn_manager_prefix="public_listener"}[1m]))
-```
-
-### Are responses from this service mostly successful?
-
-Unlike the perceived success rate of requests, which is measured from the calling side, this is the real success rate of requests measured on the serving-side. This is a serving-side parallel metric that can help clarify underlying causes of problems in the calling-side equivalent metric. Ideally this metric should roughly track the calling side values in a 1-1 manner.
-
-##### Metrics
-
-`envoy_http_downstream_rq_total`
-
-`envoy_http_downstream_rq_xx`
-
-#### Alerting
-
-If the value crosses a user defined baseline.
-
-#### Example dashboard (plot; %)
-
-##### Total
-
-```
-sum(increase(envoy_http_downstream_rq_total{cluster="$cluster",local_cluster="$service",envoy_http_conn_manager_prefix="public_listener"}[1m]))
-```
-
-##### BY STATUS CODE:
-
-```
-sum(increase(envoy_http_downstream_rq_xx{cluster="$cluster",local_cluster="$service",envoy_http_conn_manager_prefix="public_listener"}[1m])) by (envoy_response_code_class)
-```
-
-## Outbound traffic scenarios
-
-### Is this service sending traffic to its upstreams?
-
-Similar to the real request rate for requests arriving at a service, it may be helpful to view the perceived request rate departing from a service through its upstreams.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_total`
-
-#### Alerting
-
-If the value crosses a user defined success threshold.
-
-#### Example dashboard (plot; rate)
-
-```
-sum(irate(envoy_cluster_upstream_rq_total{cluster="$cluster",
-local_cluster="$service",
-consul_destination_target!=""}[1m])) by (consul_destination_target)
-```
-
-### Are requests from this service to its upstreams mostly successful?
-
-Similar to the real success rate of requests arriving at a service, it is also important to track the perceived success rate of requests departing from a service through its upstreams.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_xx`
-
-#### Alerting
-
-If the value crosses a user defined success threshold.
-
-#### Example dashboard (plot; value)
-
-```
-sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!="5",
-cluster="$cluster",local_cluster="$service",
-consul_destination_target!=""}[1m])) by (consul_destination_target) / sum(irate(envoy_cluster_upstream_rq_xx{cluster="$cluster",local_cluster="$service",consul_destination_target!=""}[1m])) by (consul_destination_target)
-```
-
-### Are requests from this service to its upstreams handled in a timely manner?
-
-Similar to the latency of requests departing for a service, it is useful to track the 95th percentile of the latency of requests departing from a service through its upstreams.
-
-##### Metric
-
-`envoy_cluster_upstream_rq_time_bucket`
-
-#### Alerting
-
-If the value crosses a user defined success threshold.
-
-#### Example dashboard (plot; value)
-
-```
-histogram_quantile(0.95, sum(rate(envoy_cluster_upstream_rq_time_bucket{cluster="$cluster",
-local_cluster="$service",consul_target!=""}[1m])) by (le, consul_destination_target))
-```
-
-## Next steps
-
-In this guide, you learned recommendations for monitoring your Envoy metrics, and why monitoring these metrics is important for your Consul deployment.
-
-To learn about monitoring Consul components, visit our [Monitoring Consul components](/well-architected-framework/reliability/reliability-consul-monitoring-consul-components) documentation.
diff --git a/website/content/docs/connect/observability/ui-visualization.mdx b/website/content/docs/connect/observability/ui-visualization.mdx
deleted file mode 100644
index 2cbf0b0ae26b..000000000000
--- a/website/content/docs/connect/observability/ui-visualization.mdx
+++ /dev/null
@@ -1,726 +0,0 @@
----
-layout: docs
-page_title: Service Mesh Observability - UI Visualization
-description: >-
- Consul's UI can display a service's topology and associated metrics from the service mesh. Learn how to configure the UI to collect metrics from your metrics provider, modify access for metrics proxies, and integrate custom metrics providers.
----
-
-# Service Mesh Observability: UI Visualization
-
--> Coming here from "Configure metrics dashboard" or "Configure dashboard"? See [Configuring Dashboard URLs](#configuring-dashboard-urls).
-
-Since Consul 1.9.0, Consul's built in UI includes a topology visualization to
-show a service's immediate connectivity at a glance. It is not intended as a
-replacement for dedicated monitoring solutions, but rather as a quick overview
-of the state of a service and its connections within the Service Mesh.
-
-The topology visualization requires services to be using [service mesh](/consul/docs/connect) via [sidecar proxies](/consul/docs/connect/proxies).
-
-The visualization may optionally be configured to include a link to an external
-per-service dashboard. This is designed to provide convenient deep links to your
-existing monitoring or Application Performance Monitoring (APM) solution for
-each service. More information can be found in [Configuring Dashboard
-URLs](#configuring-dashboard-urls).
-
-It is possible to configure the UI to fetch basic metrics from your metrics
-provider storage to augment the visualization as displayed below.
-
-
-
-Consul has built-in support for overlaying metrics from a
-[Prometheus](https://prometheus.io) backend. Alternative metrics providers may
-be supported using a new and experimental JavaScript API. See [Custom Metrics
-Providers](#custom-metrics-providers).
-
-## Kubernetes
-
-If running Consul in Kubernetes, the Helm chart can automatically configure Consul's UI to display topology
-visualizations. See our [Kubernetes observability docs](/consul/docs/k8s/connect/observability/metrics) for more information.
-
-## Configuring the UI To Display Metrics
-
-To configure Consul's UI to fetch metrics there are two required configuration settings.
-These need to be set on each Consul Agent that is responsible for serving the
-UI. If there are multiple clients with the UI enabled in a datacenter for
-redundancy these configurations must be added to all of them.
-
-We assume that the UI is already enabled by setting
-[`ui_config.enabled`](/consul/docs/agent/config/config-files#ui_config_enabled) to `true` in the
-agent's configuration file.
-
-To use the built-in Prometheus provider
-[`ui_config.metrics_provider`](/consul/docs/agent/config/config-files#ui_config_metrics_provider)
-must be set to `prometheus`.
-
-The UI must query the metrics provider through a proxy endpoint. This simplifies
-deployment where Prometheus is not exposed externally to UI user's browsers.
-
-To set this up, provide the URL that the _Consul agent_ should use to reach the
-Prometheus server in
-[`ui_config.metrics_proxy.base_url`](/consul/docs/agent/config/config-files#ui_config_metrics_proxy_base_url).
-For example in Kubernetes, the Prometheus helm chart by default installs a
-service named `prometheus-server` so each Consul agent can reach it on
-`http://prometheus-server` (using Kubernetes' DNS resolution).
-
-A full configuration to enable Prometheus is given below.
-
-
-
-
-
-```hcl
-ui_config {
- enabled = true
- metrics_provider = "prometheus"
- metrics_proxy {
- base_url = "http://prometheus-server"
- }
-}
-```
-
-
-
-
-
-```yaml
-ui:
- enabled: true
- metrics:
- enabled: true # by default, this inherits from the value global.metrics.enabled
- provider: "prometheus"
- baseURL: http://prometheus-server
-```
-
-
-
-
-
-```json
-{
- "ui_config": {
- "enabled": true,
- "metrics_provider": "prometheus",
- "metrics_proxy": {
- "base_url": "http://prometheus-server"
- }
- }
-}
-```
-
-
-
-
-
--> **Note**: For more information on configuring the observability UI on Kubernetes, use this [reference](/consul/docs/k8s/connect/observability/metrics).
-
-## Configuring Dashboard URLs
-
-Since Consul's visualization is intended as an overview of your mesh and not a
-comprehensive monitoring tool, you can configure a service dashboard URL
-template which allows users to click directly through to the relevant
-service-specific dashboard in an external tool like
-[Grafana](https://grafana.com) or a hosted provider.
-
-To configure this, you must provide a URL template in the [agent configuration
-file](/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates) for all agents that
-have the UI enabled. The template is essentially the URL to the external
-dashboard, but can have placeholder values which will be replaced with the
-service name, namespace and datacenter where appropriate to allow deep-linking
-to the relevant information.
-
-An example with Grafana is shown below.
-
-
-
-
-
-```hcl
-ui_config {
- enabled = true
- dashboard_url_templates {
- service = "https://grafana.example.com/d/lDlaj-NGz/service-overview?orgId=1&var-service={{Service.Name}}&var-namespace={{Service.Namespace}}&var-partition={{Service.Partition}}&var-dc={{Datacenter}}"
- }
-}
-```
-
-
-
-
-
-```yaml
-# The UI is enabled by default so this stanza is not required.
-ui:
- enabled: true
- # This configuration requires version 0.40.0 or later of the Helm chart.
- dashboardURLTemplates:
- service: "https://grafana.example.com/d/lDlaj-NGz/service-overview?orgId=1&var-service={{Service.Name}}&var-namespace={{Service.Namespace}}&var-dc={{Datacenter}}"
-
-# If you are using a version of the Helm chart older than 0.40.0, you must
-# configure the dashboard URL template using the `server.extraConfig` parameter
-# in the Helm chart's values file.
-server:
- extraConfig: |
- {
- "ui_config": {
- "dashboard_url_templates": {
- "service": "https://grafana.example.com/d/lDlaj-NGz/service-overview?orgId=1&var-service={{ "{{" }}Service.Name}}&var-namespace={{ "{{" }}Service.Namespace}}&var-dc={{ "{{" }}Datacenter}}"
- }
- }
- }
-```
-
-
-
-
-
-```json
-{
- "ui_config": {
- "enabled": true,
- "dashboard_url_templates": {
- "service": "https://grafana.example.com/d/lDlaj-NGz/service-overview?orgId=1\u0026var-service={{Service.Name}}\u0026var-namespace={{Service.Namespace}}\u0026var-partition={{Service.Partition}}\u0026var-dc={{Datacenter}}"
- }
- }
-}
-```
-
-
-
-
-
-~> **Note**: On Kubernetes, the Consul Server configuration set in the Helm config's
-[`server.extraConfig`](/consul/docs/k8s/helm#v-server-extraconfig) key must be specified
-as JSON. The `{{` characters in the URL must be escaped using `{{ "{{" }}` so that Helm
-doesn't try to template them.
-
-
-
-### Metrics Proxy
-
-In many cases the metrics backend may be inaccessible to UI user's browsers or
-may be on a different domain and so subject to CORS restrictions. To make it
-simpler to serve the metrics to the UI in these cases, the Consul agent can
-proxy requests for metrics from the UI to the backend.
-
-**This is intended to simplify setup in test and demo environments. Careful
-consideration should be given towards using this in production.**
-
-The simplest configuration is described in [Configuring the UI for
-metrics](#configuring-the-ui-for-metrics).
-
-#### Metrics Proxy Security
-
-~> **Security Note**: Exposing a backend metrics service to potentially
-un-authenticated network traffic via the proxy should be _carefully_ considered
-in production.
-
-The metrics proxy endpoint is internal and intended only for UI use. However by
-enabling it anyone with network access to the agent's API port may use it to
-access metrics from the backend.
-
-**If ACLs are not enabled, full access to metrics will be exposed to
-un-authenticated workloads on the network**.
-
-With ACLs enabled, the proxy endpoint requires a valid token with read access
-to all nodes and services (across all namespaces in Enterprise):
-
-
-
-
-
-
-```hcl
-service_prefix "" {
- policy = "read"
-}
-node_prefix "" {
- policy = "read"
-}
-```
-
-```json
-{
- "service_prefix": {
- "": {
- "policy": "read"
- }
- },
- "node_prefix": {
- "": {
- "policy": "read"
- }
- }
-}
-```
-
-
-
-
-
-
-
-```hcl
-namespace_prefix "" {
- service_prefix "" {
- policy = "read"
- }
- node_prefix "" {
- policy = "read"
- }
-}
-```
-
-```json
-{
- "namespace_prefix": {
- "": {
- "service_prefix": {
- "": {
- "policy": "read"
- }
- },
- "node_prefix": {
- "": {
- "policy": "read"
- }
- }
- }
- }
-}
-```
-
-
-
-
-
-
-It's typical for most authenticated users to have this level of access in Consul
-as it's required for viewing the catalog or discovering services. If you use a
-[Single Sign-On integration](/consul/docs/security/acl/auth-methods/oidc) (Consul
-Enterprise) users of the UI can be automatically issued an ACL token with the
-privileges above to be allowed access to the metrics through the proxy.
-
-Even with ACLs enabled, the proxy endpoint doesn't deeply understand the query
-language of the backend so there is no way it can enforce least-privilege access
-to only specific service-related metrics.
-
-_If you are not comfortable with all users of Consul having full access to the
-metrics backend, you should not use the proxy and find an alternative like using
-a custom provider that can query the metrics backend directly_.
-
-##### Path Allowlist
-
-To limit exposure of the metrics backend, paths must be explicitly added to an
-allowlist to avoid exposing unintended parts of the API. For example with
-Prometheus, both the `/api/v1/query_range` and `/api/v1/query` endpoints are
-needed to load time-series and individual stats. If the proxy had the `base_url`
-set to `http://prometheus-server` then the proxy would also expose read access
-to several other endpoints such as `/api/v1/status/config` which includes all
-Prometheus configuration which might include sensitive information.
-
-If you use the built-in `prometheus` provider the proxy is limited to the
-essential endpoints. The default value for `metrics_proxy.path_allowlist` is
-`["/api/v1/query_range", "/api/v1/query"]` as required by the built-in
-`prometheus` provider .
-
-If you use a custom provider that uses the metrics proxy, you'll need to
-explicitly set the allowlist based on the endpoints the provider needs to
-access.
-
-#### Adding Headers
-
-It is also possible to configure the proxy to add one or more headers to
-requests as they pass through. This is useful when the metrics backend requires
-authentication. For example if your metrics are shipped to a hosted provider,
-you could provision an API token specifically for the Consul UI and configure
-the proxy to add it as in the example below. This keeps the API token only
-visible to Consul operators in the configuration file while UI users can query
-the metrics they need without separately obtaining a token for that provider or
-having a token exposed to them that they might be able to use elsewhere.
-
-
-
-
-
-```hcl
-ui_config {
- enabled = true
- metrics_provider = "example-apm"
- metrics_proxy {
- base_url = "https://example-apm.com/api/v1/metrics"
- add_headers = [
- {
- name = "Authorization"
- value = "Bearer "
- }
- ]
- }
-}
-```
-
-
-
-
-
-```json
-{
- "ui_config": {
- "enabled": true,
- "metrics_provider": "example-apm",
- "metrics_proxy": {
- "base_url": "https://example-apm.com/api/v1/metrics",
- "add_headers": [
- {
- "name": "Authorization",
- "value": "Bearer \u003ctoken\u003e"
- }
- ]
- }
- }
-}
-```
-
-
-
-
-
-## Custom Metrics Providers
-
-Consul 1.9.0 includes a built-in provider for fetching metrics from
-[Prometheus](https://prometheus.io). To enable the UI visualization feature
-to work with other existing metrics stores and hosted services, we created a
-"metrics provider" interface in JavaScript. A custom provider may be written and
-the JavaScript file served by the Consul agent.
-
-~> **Note**: this interface is _experimental_ and may change in breaking ways or
-be removed entirely as we discover the needs of the community. Please provide
-feedback on [GitHub](https://github.com/hashicorp/consul) or
-[Discuss](https://discuss.hashicorp.com/) on how you'd like to use this.
-
-The template for a complete provider JavaScript file is given below.
-
-
-
-```javascript
-(function () {
- var provider = {
- /**
- * init is called when the provider is first loaded.
- *
- * options.providerOptions contains any operator configured parameters
- * specified in the `metrics_provider_options_json` field of the Consul
- * agent configuration file.
- *
- * Consul will provide:
- *
- * 1. A boolean field options.metrics_proxy_enabled to indicate whether the
- * agent has a metrics proxy configured.
- *
- * 2. A function options.fetch which is a thin wrapper around the browser's
- * [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)
- * that prefixes any url with the url of Consul's internal metrics proxy
- * endpoint and adds your current Consul ACL token to the request
- * headers. Otherwise it functions like the browser's native fetch.
- *
- * The provider should throw an Exception if the options are not valid, for
- * example because it requires a metrics proxy and one is not configured.
- */
- init: function(options) {},
-
- /**
- * serviceRecentSummarySeries should return time series for a recent time
- * period summarizing the usage of the named service in the indicated
- * datacenter. In Consul Enterprise a non-empty namespace is also provided.
- *
- * If these metrics aren't available then an empty series array may be
- * returned.
- *
- * The period may (later) be specified in options.startTime and
- * options.endTime.
- *
- * The service's protocol must be given as one of Consul's supported
- * protocols e.g. "tcp", "http", "http2", "grpc". If it is empty or the
- * provider doesn't recognize the protocol, it should treat it as "tcp" and
- * provide basic connection stats.
- *
- * The expected return value is a JavaScript promise which resolves to an
- * object that should look like the following:
- *
- * {
- * // The unitSuffix is shown after the value in tooltips. Values will be
- * // rounded and shortened. Larger values will already have a suffix
- * // like "10k". The suffix provided here is concatenated directly
- * // allowing for suffixes like "mbps/kbps" by using a suffix of "bps".
- * // If the unit doesn't make sense in this format, include a
- * // leading space for example " rps" would show as "1.2k rps".
- * unitSuffix: " rps",
- *
- * // The set of labels to graph. The key should exactly correspond to a
- * // property of every data point in the array below except for the
- * // special case "Total" which is used to show the sum of all the
- * // stacked graph values. The key is displayed in the tooltip so it
- * // should be human-friendly but as concise as possible. The value is a
- * // longer description that is displayed in the graph's key on request
- * // to explain exactly what the metrics mean.
- * labels: {
- * "Total": "Total inbound requests per second.",
- * "Successes": "Successful responses (with an HTTP response code ...",
- * "Errors": "Error responses (with an HTTP response code in the ...",
- * },
- *
- * data: [
- * {
- * time: 1600944516286, // milliseconds since Unix epoch
- * "Successes": 1234.5,
- * "Errors": 2.3,
- * },
- * ...
- * ]
- * }
- *
- * Every data point object should have a value for every series label
- * (except for "Total") otherwise it will be assumed to be "0".
- */
- serviceRecentSummarySeries: function(serviceDC, namespace, serviceName, protocol, options) {},
-
- /**
- * serviceRecentSummaryStats should return four summary statistics for a
- * recent time period for the named service in the indicated datacenter. In
- * Consul Enterprise a non-empty namespace is also provided.
- *
- * If these metrics aren't available then an empty array may be returned.
- *
- * The period may (later) be specified in options.startTime and
- * options.endTime.
- *
- * The service's protocol must be given as one of Consul's supported
- * protocols e.g. "tcp", "http", "http2", "grpc". If it is empty or the
- * provider doesn't recognize it it should treat it as "tcp" and provide
- * just basic connection stats.
- *
- * The expected return value is a JavaScript promise which resolves to an
- * object that should look like the following:
- *
- * {
- // stats is an array of stats to show. The first four of these will be
- // displayed. Fewer may be returned if not available.
- * stats: [
- * {
- * // label should be 3 chars or fewer as an abbreviation
- * label: "SR",
- *
- * // desc describes the stat in a tooltip
- * desc: "Success Rate - the percentage of all requests that were not 5xx status",
- *
- * // value is a string allowing the provider to format it and add
- * // units as appropriate. It should be as compact as possible.
- * value: "98%",
- * }
- * ]
- * }
- */
- serviceRecentSummaryStats: function(serviceDC, namespace, serviceName, protocol, options) {},
-
- /**
- * upstreamRecentSummaryStats should return four summary statistics for each
- * upstream service over a recent time period, relative to the named service
- * in the indicated datacenter. In Consul Enterprise a non-empty namespace
- * is also provided.
- *
- * Note that the upstreams themselves might be in different datacenters but
- * we only pass the target service DC since typically these metrics should
- * be from the outbound listener of the target service in this DC even if
- * the requests eventually end up in another DC.
- *
- * If these metrics aren't available then an empty array may be returned.
- *
- * The period may (later) be specified in options.startTime and
- * options.endTime.
- *
- * The expected return value is a JavaScript promise which resolves to an
- * object that should look like the following:
- *
- * {
- * stats: {
- * // Each upstream will appear as an entry keyed by the upstream
- * // service name. The value is an array of stats with the same
- * // format as serviceRecentSummaryStats response.stats. Note that
- * // different upstreams might show different stats depending on
- * // their protocol.
- * "upstream_name": [
- * {label: "SR", desc: "...", value: "99%"},
- * ...
- * ],
- * ...
- * }
- * }
- */
- upstreamRecentSummaryStats: function(serviceDC, namespace, serviceName, upstreamName, options) {},
-
- /**
- * downstreamRecentSummaryStats should return four summary statistics for
- * each downstream service over a recent time period, relative to the named
- * service in the indicated datacenter. In Consul Enterprise a non-empty
- * namespace is also provided.
- *
- * Note that the service may have downstreams in different datacenters. For
- * some metrics systems which are per-datacenter this makes it hard to query
- * for all downstream metrics from one source. For now the UI will only show
- * downstreams in the same datacenter as the target service. In the future
- * this method may be called multiple times, once for each DC that contains
- * downstream services to gather metrics from each. In that case a separate
- * option for target datacenter will be used since the target service's DC
- * is still needed to correctly identify the outbound clusters that will
- * route to it from the remote DC.
- *
- * If these metrics aren't available then an empty array may be returned.
- *
- * The period may (later) be specified in options.startTime and
- * options.endTime.
- *
- * The expected return value is a JavaScript promise which resolves to an
- * object that should look like the following:
- *
- * {
- * stats: {
- * // Each downstream will appear as an entry keyed by the downstream
- * // service name. The value is an array of stats with the same
- * // format as serviceRecentSummaryStats response.stats. Different
- * // downstreams may display different stats if required although the
- * // protocol should be the same for all as it is the target
- * // service's protocol that matters here.
- * "downstream_name": [
- * {label: "SR", desc: "...", value: "99%"},
- * ...
- * ],
- * ...
- * }
- * }
- */
- downstreamRecentSummaryStats: function(serviceDC, namespace, serviceName, options) {}
- }
-
- // Register the provider with Consul for use. This example would be usable by
- // configuring the agent with `ui_config.metrics_provider = "example-provider".
- window.consul.registerMetricsProvider("example-provider", provider)
-
-}());
-```
-
-
-
-Additionally, the built in [Prometheus
-provider code](https://github.com/hashicorp/consul/blob/main/ui/packages/consul-ui/vendor/metrics-providers/prometheus.js)
-can be used as a reference.
-
-### Configuring the Agent With a Custom Metrics Provider.
-
-In the example below, we configure the Consul agent to use a metrics provider
-named `example-provider`, which is defined in
-`/usr/local/bin/example-metrics-provider.js`. The name `example-provider` must
-have been specified in the call to `consul.registerMetricsProvider` as in the
-code listing in the last section.
-
-
-
-
-
-```hcl
-ui_config {
- enabled = true
- metrics_provider = "example-provider"
- metrics_provider_files = ["/usr/local/bin/example-metrics-provider.js"]
- metrics_provider_options_json = <<-EOT
- {
- "foo": "bar"
- }
- EOT
-}
-```
-
-
-
-
-
-```json
-{
- "ui_config": {
- "enabled": true,
- "metrics_provider": "example-provider",
- "metrics_provide_files": ["/usr/local/bin/example-metrics-provider.js"],
- "metrics_provider_options_json": "{\"foo\":\"bar\"}"
- }
-}
-```
-
-
-
-
-More than one JavaScript file may be specified in
-[`metrics_provider_files`](/consul/docs/agent/config/config-files#ui_config_metrics_provider_files)
-and all will be served allowing flexibility if needed to include dependencies.
-Only one metrics provider can be configured and used at one time.
-
-The
-[`metrics_provider_options_json`](/consul/docs/agent/config/config-files#ui_config_metrics_provider_options_json)
-field is an optional literal JSON object which is passed to the provider's
-`init` method at startup time. This allows configuring arbitrary parameters for
-the provider in config rather than hard coding them into the provider itself to
-make providers more reusable.
-
-The provider may fetch metrics directly from another source although in this
-case the agent will probably need to serve the correct CORS headers to prevent
-browsers from blocking these requests. These may be configured with
-[`http_config.response_headers`](/consul/docs/agent/config/config-files#response_headers).
-
-Alternatively, the provider may choose to use the [built-in metrics
-proxy](#metrics-proxy) to avoid cross domain issues or to inject additional
-authorization headers without requiring each UI user to be separately
-authenticated to the metrics backend.
-
-A function that behaves like the browser's [Fetch
-API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API) is provided to
-the metrics provider JavaScript during `init` as `options.fetch`. This is a thin
-wrapper that prefixes any url with the url of Consul's metrics proxy endpoint
-and adds your current Consul ACL token to the request headers. Otherwise it
-functions like the browser's native fetch and will forward your request on to the
-metrics backend. The response will be returned without any modification to be
-interpreted by the provider and converted into the format as described in the
-interface above.
-
-Provider authors should make it clear to users which paths are required so they
-can correctly configure the [path allowlist](#path-allowlist) in the metrics
-proxy to avoid exposing more than needed of the metrics backend.
-
-### Custom Provider Security Model
-
-Since the JavaScript file(s) are included in Consul's UI verbatim, the code in
-them must be treated as fully trusted by the operator. Typically they will have
-authored this or will need to carefully vet providers written by third parties.
-
-This is equivalent to using the existing `-ui-dir` flag to serve an alternative
-version of the UI - in either model the operator takes full responsibility for
-the provenance of the code being served since it has the power to intercept ACL
-tokens, access cookies and local storage for the Consul UI domain and possibly
-more.
-
-## Current Limitations
-
-Currently there are some limitations to this feature.
-
-- **No cross-datacenter support** The initial metrics provider integration is
- with Prometheus which is popular and easy to setup within one Kubernetes
- cluster. However, when using the Consul UI in a multi-datacenter deployment,
- the UI allows users to select any datacenter to view.
-
- This means that the Prometheus server that the Consul agent serving the UI can
- access likely only has metrics for the local datacenter and a full solution
- would need additional proxying or exposing remote Prometheus servers on the
- network in remote datacenters. Later we may support an easy way to set this up
- via Consul service mesh but initially we don't attempt to fetch metrics in the UI
- if you are browsing a remote datacenter.
-
-- **Built-in provider requires metrics proxy** Initially the built-in
- `prometheus` provider only support querying Prometheus via the [metrics
- proxy](#metrics-proxy). Later it may be possible to configure it for direct
- access to an expose Prometheus.
diff --git a/website/content/docs/connect/proxies/built-in.mdx b/website/content/docs/connect/proxies/built-in.mdx
deleted file mode 100644
index 93ec90bcb4e3..000000000000
--- a/website/content/docs/connect/proxies/built-in.mdx
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: docs
-page_title: Built-in Proxy Configuration | Service Mesh
-description: >-
- Consul includes a built-in L4 proxy with limited capabilities to use for development and testing only. Use the built-in proxy config key reference to learn about the options you can configure.
----
-
-# Built-in Proxy Configuration for Service Mesh
-
-~> **Note:** The built-in proxy is not supported for production deployments. It does not
-support many of Consul's service mesh features, and is not under active development.
-The [Envoy proxy](/consul/docs/connect/proxies/envoy) should be used for production deployments.
-
-Consul comes with a built-in L4 proxy for testing and development with Consul
-service mesh.
-
-## Proxy Config Key Reference
-
-Below is a complete example of all the configuration options available
-for the built-in proxy.
-
-```json
-{
- "service": {
- "name": "example-service",
- "connect": {
- "sidecar_service": {
- "proxy": {
- "config": {
- "bind_address": "0.0.0.0",
- "bind_port": 20000,
- "local_service_address": "127.0.0.1:1234",
- "local_connect_timeout_ms": 1000,
- "handshake_timeout_ms": 10000,
- "upstreams": []
- },
- "upstreams": [
- {
- "destination_name": "example-upstream",
- "config": {
- "connect_timeout_ms": 1000
- }
- }
- ]
- }
- }
- }
- }
-}
-```
-
-All fields are optional with a reasonable default.
-
-- `bind_address` - The address the proxy will bind its
- _public_ mTLS listener to. It defaults to the same address the agent binds to.
-
-- `bind_port` - The port the proxy will bind its _public_
- mTLS listener to. If not provided, the agent will assign a random port from its
- configured proxy port range specified by [`sidecar_min_port`](/consul/docs/agent/config/config-files#sidecar_min_port)
- and [`sidecar_max_port`](/consul/docs/agent/config/config-files#sidecar_max_port).
-
-- `local_service_address`- The `[address]:port`
- that the proxy should use to connect to the local application instance. By default
- it assumes `127.0.0.1` as the address and takes the port from the service definition's
- `port` field. Note that allowing the application to listen on any non-loopback
- address may expose it externally and bypass the service mesh's access enforcement. It may
- be useful though to allow non-standard loopback addresses or where an alternative
- known-private IP is available for example when using internal networking between
- containers.
-
-- `local_connect_timeout_ms` - The number
- of milliseconds the proxy will wait to establish a connection to the _local application_
- before giving up. Defaults to `1000` or 1 second.
-
-- `handshake_timeout_ms` - The number of milliseconds
- the proxy will wait for _incoming_ mTLS connections to complete the TLS handshake.
- Defaults to `10000` or 10 seconds.
-
-- `upstreams`- **Deprecated** Upstreams are now specified
- in the `connect.proxy` definition. Upstreams specified in the opaque config map
- here will continue to work for compatibility but it's strongly recommended that
- you move to using the higher level [upstream configuration](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference).
-
-## Proxy Upstream Config Key Reference
-
-All fields are optional with a reasonable default.
-
-- `connect_timeout_ms` - The number of milliseconds
- the proxy will wait to establish a TLS connection to the discovered upstream instance
- before giving up. Defaults to `10000` or 10 seconds.
diff --git a/website/content/docs/connect/proxies/deploy-service-mesh-proxies.mdx b/website/content/docs/connect/proxies/deploy-service-mesh-proxies.mdx
deleted file mode 100644
index 0bb3f7df0389..000000000000
--- a/website/content/docs/connect/proxies/deploy-service-mesh-proxies.mdx
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: docs
-page_title: Deploy service mesh proxies
-description: >-
- Envoy and other proxies in Consul service mesh enable service-to-service communication across your network. Learn how to deploy service mesh proxies in this topic.
----
-
-# Deploy service mesh proxies services
-
-This topic describes how to create, register, and start service mesh proxies in Consul. Refer to [Service mesh proxies overview](/consul/docs/connect/proxies) for additional information about how proxies enable Consul functionalities.
-
-For information about deploying proxies as sidecars for service instances, refer to [Deploy sidecar proxy services](/consul/docs/connect/proxies/deploy-sidecar-services).
-
-## Overview
-
-Complete the following steps to deploy a service mesh proxy:
-
-1. It is not required, but you can create a proxy defaults configuration entry that contains global passthrough settings for all Envoy proxies.
-1. Create a service definition file and specify the proxy configurations in the `proxy` block.
-1. Register the service using the API or CLI.
-1. Start the proxy service. Proxies appear in the list of services registered to Consul, but they must be started before they begin to route traffic in your service mesh.
-
-## Requirements
-
-If ACLs are enabled and you want to configure global Envoy settings using the [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults), you must present a token with `operator:write` permissions. Refer to [Create a service token](/consul/docs/security/acl/tokens/create/create-a-service-token) for additional information.
-
-## Configure global Envoy passthrough settings
-
-If you want to define global passthrough settings for all Envoy proxies, create a proxy defaults configuration entry and specify default settings, such as access log configuration. Note that [service defaults configuration entries](/consul/docs/connect/config-entries/service-defaults) override proxy defaults and individual service configurations override both configuration entries.
-
-1. Create a proxy defaults configuration entry and specify the following parameters:
- - `Kind`: Must be set to `proxy-defaults`
- - `Name`: Must be set to `global`
-1. Configure any additional settings you want to apply to all proxies. Refer to [Proxy defaults configuration entry reference](/consul/docs/connect/config-entries/proxy-defaults) for details about all settings available in the configuration entry.
-1. Apply the configuration by either calling the [`/config` HTTP API endpoint](/consul/api-docs/config) or running the [`consul config write` CLI command](/consul/commands/config/write). The following example writes a proxy defaults configuration entry from a local HCL file using the CLI:
-
-```shell-session
-$ consul config write proxy-defaults.hcl
-```
-
-## Define service mesh proxy
-
-Create a service definition file and configure the following fields to define a service mesh proxy:
-
-1. Set the `kind` field to `connect-proxy`. Refer to the [services configuration reference](/consul/docs/services/configuration/services-configuration-reference#kind) for information about other kinds of proxies you can declare.
-1. Specify a name for the proxy service in the `name` field. Consul applies the configurations to any proxies you bootstrap with the same name.
-1. In the `proxy.destination_service_name` field, specify the name of the service that the proxy represents.
-1. Configure any additional proxy behaviors that you want to implement in the `proxy` block. Refer to the [Service mesh proxy configuration reference](/consul/docs/connect/proxies/proxy-config-reference) for information about all parameters.
-1. Specify a port number where other services registered with Consul can discover and connect to the proxies service in the `port` field. To ensure that services only allow external connections established through the service mesh protocol, you should configure all services to only accept connections on a loopback address.
-
-Refer to the [Service mesh proxy configuration reference](/consul/docs/connect/proxies/proxy-config-reference) for example configurations.
-
-## Register the service
-
-Provide the service definition to the Consul agent to register your proxy service. You can use the same methods for registering proxy services as you do for registering application services:
-
-- Place the service definition in a Consul agent's configuration directory and start, restart, or reload the agent. Use this method when implementing changes to an existing proxy service.
-- Use the `consul services register` command to register the proxy service with a running Consul agent.
-- Call the `/agent/service/register` HTTP API endpoint to register the proxy service with a running Consul agent.
-
-Refer to [Register services and health checks](/consul/docs/services/usage/register-services-checks) for instructions.
-
-In the following example, the `consul services register` command registers a proxy service stored in `proxy.hcl`:
-
-```shell-session
-$ consul services register proxy.hcl
-```
-
-## Start the proxy
-
-Envoy requires a bootstrap configuration file before it can start. Use the [`consul connect envoy` command](/consul/commands/connect/envoy) to create the Envoy bootstrap configuration and start the proxy service. Specify the ID of the proxy you want to start with the `-proxy-id` option.
-
-The following example command starts an Envoy proxy for the `web-proxy` service:
-
-```shell-session
-$ consul connect envoy -proxy-id=web-proxy
-```
-
-For details about operating an Envoy proxy in Consul, refer to the [Envoy proxy reference](/consul/docs/connect/proxies/envoy).
diff --git a/website/content/docs/connect/proxies/deploy-sidecar-services.mdx b/website/content/docs/connect/proxies/deploy-sidecar-services.mdx
deleted file mode 100644
index c42a5b2c7f5f..000000000000
--- a/website/content/docs/connect/proxies/deploy-sidecar-services.mdx
+++ /dev/null
@@ -1,284 +0,0 @@
----
-layout: docs
-page_title: Deploy proxies as sidecar services
-description: >-
- You can register a service instance and its sidecar proxy at the same time. Learn about default settings, customizable parameters, limitations, and lifecycle behaviors of the sidecar proxy.
----
-
-# Deploy sidecar services
-
-This topic describes how to create, register, and start sidecar proxy services in Consul. Refer to [Service mesh proxies overview](/consul/docs/connect/proxies) for additional information about how proxies enable Consul's functions and operations. For information about deploying service mesh proxies, refer to [Deploy service mesh proxies](/consul/docs/connect/proxies/deploy-service-mesh-proxies).
-
-## Overview
-
-Sidecar proxies run on the same node as the single service instance that they handle traffic for.
-They may be on the same VM or running as a separate container in the same network namespace.
-
-You can attach a sidecar proxy to a service you want to deploy to your mesh:
-
-1. It is not required, but you can create a proxy defaults configuration entry that contains global passthrough settings for all Envoy proxies.
-1. Create the service definition and include the `connect` block. The `connect` block contains the sidecar proxy configurations that allow the service to interact with other services in the mesh.
-1. Register the service using either the API or CLI.
-1. Start the sidecar proxy service.
-
-## Requirements
-
-If ACLs are enabled and you want to configure global Envoy settings in the [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults), you must present a token with `operator:write` permissions. Refer to [Create a service token](/consul/docs/security/acl/tokens/create/create-a-service-token) for additional information.
-
-## Configure global Envoy passthrough settings
-
-If you want to define global passthrough settings for all Envoy proxies, create a proxy defaults configuration entry and specify default settings, such as access log configuration. [Service defaults configuration entries](/consul/docs/connect/config-entries/service-defaults) override proxy defaults and individual service configurations override both configuration entries.
-
-1. Create a proxy defaults configuration entry and specify the following parameters:
- - `Kind`: Must be set to `proxy-defaults`
- - `Name`: Must be set to `global`
-1. Configure any additional settings you want to apply to all proxies. Refer to [Proxy defaults configuration entry reference](/consul/docs/connect/config-entries/proxy-defaults) for details about all settings available in the configuration entry.
-1. Apply the configuration by either calling the [`/config` API endpoint](/consul/api-docs/config) or running the [`consul config write` CLI command](/consul/commands/config/write). The following example writes a proxy defaults configuration entry from a local HCL file using the CLI:
-
-```shell-session
-$ consul config write proxy-defaults.hcl
-```
-
-## Define service mesh proxy
-
-Create a service definition and configure the following fields:
-
-1. `name`: Specify a name for the service you want to attach a sidecar proxy to in the `name` field. This field is required for all services you want to register in Consul.
-1. `port`: Specify a port number where other services registered with Consul can discover and connect to the service in the `port` field. This field is required for all services you want to register in Consul.
-1. `connect`: Set the `connect` field to `{ sidecar_service: {} }`. The `{ sidecar_service: {} }` value is a macro that applies a set of default configurations that enable you to quickly implement a sidecar. Refer to [Sidecar service defaults](#sidecar-service-defaults) for additional information.
-1. Configure any additional options for your service. Refer to [Services configuration reference](/consul/docs/services/configuration/services-configuration-reference) for details.
-
-In the following example, a service named `web` is configured with a sidecar proxy:
-
-
-
-
-
-```hcl
-service = {
- name = "web"
- port = 8080
- connect = { sidecar_service = {} }
-}
-```
-
-
-
-
-
-```json
-
-{
- "service": {
- "name": "web",
- "port": 8080,
- "connect": { "sidecar_service": {} }
- }
-}
-
-```
-
-
-
-
-
-When Consul processes the service definition, it generates the following configuration in place of the `sidecar_service` macro. Note that sidecar proxies services are based on the `connect-proxy` type:
-
-
-
-
-
-```hcl
-services = [
- {
- name = "web"
- port = 8080
- }
- checks = {
- Interval = "10s"
- Name = "Connect Sidecar Listening"
- TCP = "127.0.0.1:20000"
- }
- checks = {
- alias_service = "web"
- name = "Connect Sidecar Aliasing web"
- }
- kind = "connect-proxy"
- name = "web-sidecar-proxy"
- port = 20000
- proxy = {
- destination_service_id = "web"
- destination_service_name = "web"
- local_service_address = "127.0.0.1"
- local_service_port = 8080
- }
-]
-
-```
-
-
-
-
-
-```json
-{
- "services": [
- {
- "name": "web",
- "port": 8080
- },
- {
- "name": "web-sidecar-proxy",
- "port": 20000,
- "kind": "connect-proxy",
- "checks": [
- {
- "Name": "Connect Sidecar Listening",
- "TCP": "127.0.0.1:20000",
- "Interval": "10s"
- },
- {
- "name": "Connect Sidecar Aliasing web",
- "alias_service": "web"
- }
- ],
- "proxy": {
- "destination_service_name": "web",
- "destination_service_id": "web",
- "local_service_address": "127.0.0.1",
- "local_service_port": 8080
- }
- }
- ]
-}
-
-```
-
-
-
-
-
-## Register the service
-
-Provide the service definition to the Consul agent to register your proxy service. You can use the same methods for registering proxy services as you do for registering application services:
-
-- Place the service definition in a Consul agent's configuration directory and start, restart, or reload the agent. Use this method when implementing changes to an existing proxy service.
-- Use the `consul services register` command to register the proxy service with a running Consul agent.
-- Call the `/agent/service/register` HTTP API endpoint to register the proxy service with a running Consul agent.
-
-Refer to [Register services and health checks](/consul/docs/services/usage/register-services-checks) for instructions.
-
-In the following example, the `consul services register` command registers a proxy service stored in `proxy.hcl`:
-
-```shell-session
-$ consul services register proxy.hcl
-```
-
-## Start the proxy
-
-Envoy requires a bootstrap configuration file before it can start. Use the [`consul connect envoy` command](/consul/commands/connect/envoy) to create the Envoy bootstrap configuration and start the proxy service. Specify the name of the service with the attached proxy with the `-sidecar-for` option.
-
-The following example command starts an Envoy sidecar proxy for the `web` service:
-
-```shell-session
-$ consul connect envoy -sidecar-for=web
-```
-
-For details about operating an Envoy proxy in Consul, refer to [](/consul/docs/connect/proxies/envoy)
-
-## Configuration reference
-
-The `sidecar_service` block is a service definition that can contain most regular service definition fields. Refer to [Limitations](#limitations) for information about unsupported service definition fields for sidecar proxies.
-
-Consul treats sidecar proxy service definitions as a root-level service definition. All fields are optional in nested definitions, which default to opinionated settings that are intended to reduce burden of setting up a sidecar proxy.
-
-## Sidecar service defaults
-
-The following fields are set by default on a sidecar service registration. With
-[the exceptions noted](#limitations) any field may be overridden explicitly in
-the `connect.sidecar_service` definition to customize the proxy registration.
-The "parent" service refers to the service definition that embeds the sidecar
-proxy.
-
-- `id` - ID defaults to `-sidecar-proxy`. This value cannot
- be overridden as it is used to [manage the lifecycle](#lifecycle) of the
- registration.
-- `name` - Defaults to `-sidecar-proxy`.
-- `tags` - Defaults to the tags of the parent service.
-- `meta` - Defaults to the service metadata of the parent service.
-- `port` - Defaults to being auto-assigned from a configurable
- range specified by [`sidecar_min_port`](/consul/docs/agent/config/config-files#sidecar_min_port)
- and [`sidecar_max_port`](/consul/docs/agent/config/config-files#sidecar_max_port).
-- `kind` - Defaults to `connect-proxy`. This value cannot be overridden.
-- `check`, `checks` - By default we add a TCP check on the local address and
- port for the proxy, and a [service alias
- check](/consul/docs/services/usage/checks#alias-checks) for the parent service. If either
- `check` or `checks` fields are set, only the provided checks are registered.
-- `proxy.destination_service_name` - Defaults to the parent service name.
-- `proxy.destination_service_id` - Defaults to the parent service ID.
-- `proxy.local_service_address` - Defaults to `127.0.0.1`.
-- `proxy.local_service_port` - Defaults to the parent service port.
-
-### Example with overwritten configurations
-
-In the following example, the `sidecar_service` macro sets baselines configurations for the proxy, but the [proxy
-upstreams](/consul/docs/connect/proxies/proxy-config-reference#upstream-configuration-reference)
-and [built-in proxy
-configuration](/consul/docs/connect/proxies/built-in) fields contain custom values:
-
-```json
-{
- "name": "web",
- "port": 8080,
- "connect": {
- "sidecar_service": {
- "proxy": {
- "upstreams": [
- {
- "destination_name": "db",
- "local_bind_port": 9191
- }
- ],
- "config": {
- "handshake_timeout_ms": 1000
- }
- }
- }
- }
-}
-```
-
-## Limitations
-
-The following fields are not supported in the `connect.sidecar_service` block:
-
-- `id` - Sidecar services get an ID assigned and it is an error to override
- this value. This ID is required to ensure that the agent can correctly deregister the sidecar service
- later when the parent service is removed.
-- `kind` - Kind defaults to `connect-proxy` and there is no way to
- unset this behavior.
-- `connect.sidecar_service` - Service definitions cannot be nested recursively.
-- `connect.native` - The `kind` is fixed to `connect-proxy` and it is
- an error to register a `connect-proxy` that is also service mesh-native.
-
-## Lifecycle
-
-Sidecar service registration is mostly a configuration syntax helper to avoid
-adding lots of boiler plate for basic sidecar options, however the agent does
-have some specific behavior around their lifecycle that makes them easier to
-work with.
-
-The agent fixes the ID of the sidecar service to be based on the parent
-service's ID, which enables the following behavior.
-
-- A service instance can only ever have one sidecar service registered.
-- When re-registering through the HTTP API or reloading from configuration file:
- - If something changes in the nested sidecar service definition, the update is applied to the current sidecar registration instead of creating a new
- one.
- - If a service registration removes the nested `sidecar_service` then the
- previously registered sidecar for that service is deregistered
- automatically.
-- When reloading the configuration files, if a service definition changes its
- ID, then a new service instance and a new sidecar instance are
- registered. The old instance and proxy are removed because they are no longer found in
- the configuration files.
diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx
deleted file mode 100644
index ebe7f99a96e3..000000000000
--- a/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx
+++ /dev/null
@@ -1,739 +0,0 @@
----
-layout: docs
-page_title: External authorization extension configuration reference
-description: Learn how to configure the ext-authz Envoy extension, which is a builtin Consul extension that configures Envoy proxies to request authorization from an external service.
----
-
-# External authorization extension configuration reference
-
-This topic describes how to configure the external authorization Envoy extension, which configures Envoy proxies to request authorization from an external service. Refer to [Delegate authorization to an external service](/consul/docs/connect/proxies/envoy-extensions/usage/ext-authz) for usage information.
-
-## Configuration model
-
-The following list outlines the field hierarchy, data types, and requirements for the external authorization configuration. Place the configuration inside the `EnvoyExtension.Arguments` field in the proxy defaults or service defaults configuration entry. Refer to the following documentation for additional information:
-
-- [`EnvoyExtensions` in proxy defaults](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions)
-- [`EnvoyExtensions` in service defaults](/consul/docs/connect/config-entries/service-defaults#envoyextensions)
- - [Envoy External Authorization documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/ext_authz/v3/ext_authz.proto)
-
-Click on a property name to view additional details, including default values.
-
-- [`Name`](#name): string | required | must be set to `builtin/ext-authz`
-- [`Arguments`](#arguments): map | required
- - [`ProxyType`](#arguments-proxytype): string | required | `connect-proxy`
- - [`ListenerType`](#arguments-listenertype): string | required | `inbound`
- - [`InsertOptions`](#arguments-insertoptions): map
- - [`Location`](#arguments-insertoptions-location): string
- - [FilterName](#arguments-insertoptions-filtername): string
- - [`Config`](#arguments-config): map | required
- - [`BootstrapMetadataLabelsKey`](#arguments-config-bootstrapmetadatalabelskey): string
- - [`ClearRouteCache`](#arguments-config-grpcservice): boolean | `false` | HTTP only
- - [`GrpcService`](#arguments-config-grpcservice): map
- - [`Target`](#arguments-config-grpcservice-target): map | required
- - [`Service`](#arguments-config-grpcservice-target-service): map
- - [`Name`](#arguments-config-grpcservice-target-service): string
- - [`Namespace`](#arguments-config-grpcservice-target-service): string |
- - [`Partition`](#arguments-config-grpcservice-target-service): string |
- - [`URI`](#arguments-config-grpcservice-target-uri): string
- - [`Timeout`](#arguments-config-grpcservice-target-uri): string | `1s`
- - [`Authority`](#arguments-config-grpcservice-authority): string
- - [`InitialMetadata`](#arguments-config-grpcservice-initialmetadata): list
- - [`Key`](#arguments-config-grpcservice-initialmetadata): string
- - [`Value`](#arguments-config-grpcservice-initialmetadata): string
- - [`HttpService`](#arguments-config-httpservice): map
- - [`Target`](#arguments-config-httpservice-target): map | required
- - [`Service`](#arguments-config-httpservice): string
- - [`Name`](#arguments-config-httpservice-target-service): string
- - [`Namespace`](#arguments-config-httpservice-target-service): string |
- - [`Partition`](#arguments-config-httpservice-target-service): string |
- - [`URI`](#arguments-config-httpservice): string
- - [`Timeout`](#arguments-config-httpservice): string | `1s`
- - [`PathPrefix`](#arguments-config-httpservice-pathprefix): string
- - [`AuthorizationRequest`](#arguments-config-httpservice-authorizationrequest): map
- - [`AllowedHeaders`](#arguments-config-httpservice-authorizationrequest-allowedheaders): list
- - [`Contains`](#arguments-config-httpservice-authorizationrequest-allowedheaders): string
- - [`Exact`](#arguments-config-httpservice-authorizationrequest-allowedheaders): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationrequest-allowedheaders): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationrequest-allowedheaders): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationrequest-allowedheaders): string
- - [`HeadersToAdd`](#arguments-config-httpservice-authorizationrequest-headerstoadd): list
- - [`Key`](#arguments-config-httpservice-authorizationrequest-headerstoadd): string
- - [`Value`](#arguments-config-httpservice-authorizationrequest-headerstoadd): string
- - [`AuthorizationResponse`](#arguments-config-httpservice-authorizationresponse): map
- - [`AllowedUpstreamHeaders`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaders): list
- - [`Contains`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaders): string
- - [`Exact`](#arguments-config-httpservice-authorizationresponse-allowedheaders): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationresponse-allowedheaders): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationresponse-allowedheaders): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationresponse-allowedheaders): string
- - [`Suffix`](#arguments-config-httpservice-authorizationresponse-allowedheaders): string
- - [`AllowedUpstreamHeadersToAppend`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): list
- - [`Contains`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): string
- - [`Exact`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): string
- - [`Suffix`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend): string
- - [`AllowedClientHeaders`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): list
- - [`Contains`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): string
- - [`Exact`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): string
- - [`Suffix`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders): string
- - [`AllowedClientHeadersOnSuccess`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): list
- - [`Contains`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): string
- - [`Exact`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): string
- - [`Suffix`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess): string
- - [`DynamicMetadataFromHeaders`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): list
- - [`Contains`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): string
- - [`Exact`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): string
- - [`IgnoreCase`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): boolean
- - [`Prefix`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): string
- - [`SafeRegex`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): string
- - [`Suffix`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders): string
- - [`IncludePeerCertificate`](#arguments-config-includepeercertificate): boolean | `false`
- - [`MetadataContextNamespaces`](#arguments-config-metadatacontextnamespaces): list of strings | HTTP only
- - [`StatusOnError`](#arguments-config-statusonerror): number | `403` | HTTP only
- - [`StatPrefix`](#arguments-config-statprefix): string | `response`
- - [`WithRequestBody`](#arguments-config-withrequestbody): map | HTTP only
- - [`MaxRequestBytes`](#arguments-config-withrequestbody-maxrequestbytes): number
- - [`AllowPartialMessage`](#arguments-config-withrequestbody-allowpartialmessage): boolean | `false`
- - [`PackAsBytes`](#arguments-config-withrequestbody-packasbytes): boolean | `false`
-
-## Complete configuration
-
-When each field is defined, an `ext-authz` configuration has the following form:
-
-```hcl
-Name = "builtin/ext-authz"
-Arguments = {
- ProxyType = "connect-proxy"
- InsertOptions = {
- Location = ""
- FilterName = ""
- }
- Config = {
- BootstrapMetadataLabelsKey = ""
- ClearRouteCache = false // HTTP only
- GrpcService = {
- Target = {
- Service = {
- Name = ""
- Namespace = ""
- Partition = ""
- URI = ""
- Timeout = "1s"
- Authority = ""
- InitialMetadata = [
- "" : ""
- HttpService = {
- Target = {
- Service = {
- Name = ""
- Namespace = ""
- Partition = ""
- URI = ""
- Timeout = "1s"
- }
- }
- PathPrefix = "//"
- AuthorizationRequest = {
- AllowedHeaders = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- ]
- HeadersToAdd = [
- "" = ""
- ]
- }
- AuthorizationResponse = {
- AllowedUpstreamHeaders = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- Suffix = ""
- ]
- AllowedUpstreamHeadersToAppend = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- Suffix = ""
- ]
- AllowedClientHeaders = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- Suffix = ""
- ]
- AllowedClientHeadersOnSuccess = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- Suffix = ""
- DynamicMetadataFromHeaders = [
- Contains = "",
- Exact = "",
- IgnoreCase = false,
- Prefix = "",
- SafeRegex = ""
- Suffix = ""
- ]
- IncludePeerCertificate = false
- MetadataContextNamespaces = [
- ""
- ]
- StatusOnError = 403 // HTTP only
- StatPrefix = "response"
- WithRequestBody = { //HTTP only
- MaxRequestBytes =
- AllowPartialMessage = false
- PackAsBytes = false
-```
-
-## Specification
-
-This section provides details about the fields you can configure for the external authorization extension.
-### `Name`
-
-Specifies the name of the extension. Must be set to `builtin/ext-authz`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value set to `builtin/ext-authz`.
-
-### `Arguments`
-
-Contains the global configuration for the extension.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.ProxyType`
-
-Specifies the type of Envoy proxy that this extension applies to. The extension only applies to proxies that match this type and is ignored for all other proxy types. The only supported value is `connect-proxy`.
-
-#### Values
-
-- Default: `connect-proxy`
-- This field is required.
-- Data type: String
-
-### `Arguments.ListenerType`
-
-Specifies the type of listener the extension applies to. The listener type is either `inbound` or `outbound`. If the listener type is set to `inbound`, Consul applies the extension so the external authorization is enabled when other services in the mesh send messages to the service attached to the proxy. If the listener type is set to `outbound`, Consul applies the extension so the external authorization is enabled when the attached proxy sends messages to other services in the mesh.
-
-#### Values
-
-- Default: `inbound`
-- This field is required.
-- Data type is one of the following string values:
- - `inbound`
- - `outbound`
-
-### `Arguments.InsertOptions`
-
-Specifies options for defining the insertion point for the external authorization filter in the Envoy filter chain. By default, the external authorization filter is inserted as the first filter in the filter chain per the default setting for the [`Location`](#arguments-insertoptions-location) field.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Arguments.InsertOptions.Location`
-
-Specifies the insertion point for the external authorization filter in the Envoy filter chain. You can specify one of the following string values:
-
-- `First`: Inserts the filter as the first filter in the filter chain, regardless of the filter specified in the `FilterName` field.
-- `BeforeLast`: Inserts the filter before the last filter in the chain, regardless of the filter specified in the `FilterName` field. This allows the filter to be inserted after all other filters and immediately before the terminal filter.
-- `AfterFirstMatch`: Inserts the filter after the first filter in the chain that has a name matching the value of the `FilterName` field.
-- `AfterLastMatch`: Inserts the filter after the last filter in the chain that has a name matching the value of the `FilterName` field.
-- `BeforeFirstMatch`: Inserts the filter before the first filter in the chain that has a name matching the value of the `FilterName` field.
-- `BeforeLastMatch`: Inserts the filter before the last filter in the chain that has a name matching the value of the `FilterName` field.
-
-#### Values
-
-- Default: `BeforeFirstMatch`
-- Data type: String
-
-### `Arguments.InsertOptions.FilterName`
-
-Specifies the name of an existing filter in the chain to match when inserting the external authorization filter. Specifying a filter name enables you to configure an insertion point relative to the position of another filter in the chain.
-
-#### Values
-
-- Default: `envoy.filters.network.tcp_proxy` for TCP services. `envoy.filters.http.router` for HTTP services.
-- Data type: String
-
-### `Arguments.Config`
-
-Contains the configuration settings for the extension.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.Config.BootstrapMetadataLabelsKey`
-
-Specifies a key from the Envoy bootstrap metadata. Envoy adds labels associated with the key to the authorization request context.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Arguments.Config.ClearRouteCache`
-
-Directs Envoy to clear the route cache so that the external authorization service correctly affects routing decisions. If set to `true`, the filter clears all cached routes.
-
-Envoy also clears cached routes if the status returned from the authorization service is `200` for HTTP responses or `0` for gRPC responses. Envoy also clears cached routes if at least one authorization response header is added to the client request or is used for altering another client request header.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-
-### `Arguments.Config.GrpcService`
-
-Specifies the external authorization configuration for gRPC requests. Configure the `GrpcService` or the [`HttpService`](#arguments-config-httpservice) settings, but not both.
-
-#### Values
-
-- Default: None
-- Either the `GrpcService` or the `HttpService` configuration is required.
-- Data type: Map
-
-### `Arguments.Config.GrpcService.Target`
-
-Configuration for specifying the service to send gRPC authorization requests to. The `Target` field may contain the following fields:
-
-- [`Service`](#arguments-config-grpcservice-target-service) or [`Uri`](#arguments-config-grpcservice-target-uri)
-- [`Timeout`](#arguments-config-grpcservice-target-timeout)
-
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments{}.Config{}.GrpcService{}.Target{}.Service{}`
-
-Specifies the upstream external authorization service. Configure this field when authorization requests are sent to an upstream service within the service mesh. The service must be configured as an upstream of the service that the filter is applied to.
-
-Configure either the `Service` field or the [`Uri`](#arguments-config-grpcservice-target-uri) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Uri`](#arguments-config-grpcservice-target-uri) is required.
-- Data type: Map
-
-The following table describes how to configure parameters for the `Service` field:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
-| `Name` | Specifies the name of the upstream service. | String | None |
-| `Namespace` | Specifies the Consul namespace that the upstream service belongs to. | String | `default` |
-| `Partition` | Specifies the Consul admin partition that the upstream service belongs to. | String | `default` |
-
-### `Arguments.Config.GrpcService.Target.Uri`
-
-Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. If set, the value of this field must be one of `localhost:`, `127.0.0.1:`, or `::1:`.
-
-Configure either the `Uri` field or the [`Service`](#arguments-config-grpcservice-target-service) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Service`](#arguments-config-grpcservice-target-service) is required.
-- Data type: String
-
-### `Arguments.Config.GrpcService.Target.Timeout`
-
-Specifies the maximum duration that a response can take to arrive upon request.
-
-#### Values
-
-- Default: `1s`
-- Data type: String
-
-### `Arguments.Config.GrpcService.Authority`
-
-Specifies the authority header to send in the gRPC request. If this field is not set, the authority field is set to the cluster name. This field does not override the SNI that Envoy sends to the external authorization service.
-
-#### Values
-
-- Default: Cluster name
-- Data type: String
-
-### `Arguments.Config.GrpcService.InitialMetadata[]`
-
-Specifies additional metadata to include in streams initiated to the `GrpcService`. You can specify metadata for injecting additional ad-hoc authorization headers, for example, `x-foo-bar: baz-key`. For more information, including details on header value syntax, refer to the [Envoy documentation on custom request headers](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#config-http-conn-man-headers-custom-request-headers).
-
-#### Values
-
-- Default: None
-- Data type: List of one or more key-value pairs:
-
- - KEY: String
- - VALUE: String
-
-### `Arguments{}.Config{}.HttpService{}`
-
-Contains the configuration for raw HTTP communication between the filter and the external authorization service. Configure the `HttpService` or the [`GrpcService`](#arguments-config-grpcservice) settings, but not both.
-
-#### Values
-
-- Default: None
-- Either the `HttpService` or the `GrpcService` configuration is required.
-- Data type: Map
-
-### `Arguments{}.Config{}.HttpService{}.Target{}`
-
-Configuration for specifying the service to send HTTP authorization requests to. The `Target` field may contain the following fields:
-
-- [`Service`](#arguments-config-httpservice-target-service) or [`Uri`](#arguments-config-httpservice-target-uri)
-- [`Timeout`](#arguments-config-httpservice-target-timeout)
-
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments{}.Config{}.HttpService{}.Target{}.Service{}`
-
-Specifies the upstream external authorization service. Configure this field when HTTP authorization requests are sent to an upstream service within the service mesh. The service must be configured as an upstream of the service that the filter is applied to.
-
-Configure either the `Service` field or the [`Uri`](#arguments-config-httpservice-target-uri) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Uri`](#arguments-config-httpservice-target-uri) is required.
-- Data type: Map
-
-The following table describes how to configure parameters for the `Service` field:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
-| `Name` | Specifies the name of the upstream service. | String | None |
-| `Namespace` | Specifies the Consul namespace that the upstream service belongs to. | String | `default` |
-| `Partition` | Specifies the Consul admin partition that the upstream service belongs to. | String | `default` |
-
-### `Arguments{}.Config{}.HttpService{}.Target{}.Uri`
-
-Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. If set, the value of this field must be one of `localhost:`, `127.0.0.1:`, or `::1:`.
-
-Configure either the `Uri` field or the [`Service`](#arguments-config-httpservice-target-service) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Service`](#arguments-config-httpservice-target-service) is required.
-- Data type: String
-
-### `Arguments{}.Config{}.HttpService{}.Target{}.Timeout`
-
-Specifies the maximum duration that a response can take to arrive upon request.
-
-#### Values
-
-- Default: `1s`
-- Data type: String
-
-### `Arguments{}.Config{}.HttpService{}.PathPrefix`
-
-Specifies a prefix for the value of the authorization request header `Path`. You must include the preceding forward slash (`/`).
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationRequest{}`
-
-HTTP-only configuration that controls the HTTP authorization request metadata. The `AuthorizationRequest` field may contain the following parameters:
-
-- [`AllowHeaders`](#arguments-config-httpservice-authorizationrequest-allowheaders)
-- [`HeadersToAdd`](#arguments-config-httpservice-authorizationrequest-headerstoadd)
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationRequest{}.AllowHeaders[]`
-
-Specifies a set of rules for matching client request headers. The request to the external authorization service includes any client request headers that satisfy any of the rules. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/ext_authz/v3/ext_authz.proto#extensions-filters-http-ext-authz-v3-extauthz) for a detailed explanation.
-
-#### Values
-
-- Default: None
-- Data type: List of key-value pairs
-
-The following table describes the matching rules you can configure in the `AllowHeaders` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationRequest{}.HeadersToAdd[]`
-
-Specifies a list of headers to include in the request to the authorization service. Note that Envoy overwrites client request headers with the same key.
-
-#### Values
-
-- Default: None
-- Data type: List of one or more key-value pairs:
-
- - KEY: String
- - VALUE: String
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}`
-
-HTTP-only configuration that controls HTTP authorization response metadata. The `AuthorizationResponse` field may contain the following parameters:
-
-- [`AllowedUpstreamHeaders`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaders)
-- [`AllowedUpstreamHeadersToAppend`](#arguments-config-httpservice-authorizationresponse-allowedupstreamheaderstoappend)
-- [`AllowedClientHeaders`](#arguments-config-httpservice-authorizationresponse-allowedclientheaders)
-- [`AllowedClientHeadersOnSuccess`](#arguments-config-httpservice-authorizationresponse-allowedclientheadersonsuccess)
-- [`DynamicMetadataFromHeaders`](#arguments-config-httpservice-authorizationresponse-dynamicmetadatafromheaders)
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}.AllowedUpstreamHeaders[]`
-
-Specifies a set of rules for matching authorization response headers. Envoy adds any headers from the external authorization service to the client response that satisfy the rules. Envoy overwrites existing headers.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the matching rules you can configure in the `AllowedUpstreamHeaders` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}.AllowedUpstreamHeadersToAppend[]`
-
-Specifies a set of rules for matching authorization response headers. Envoy appends any headers from the external authorization service to the client response that satisfy the rules. Envoy appends existing headers.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the matching rules you can configure in the `AllowedUpstreamHeadersToAppend` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}.AllowedClientHeaders[]`
-
-Specifies a set of rules for matching client response headers. Envoy adds any headers from the external authorization service to the client response that satisfy the rules. When the list is not set, Envoy includes all authorization response headers except `Authority (Host)`. When a header is included in this list, Envoy automatically adds the following headers:
-
-- `Path`
-- `Status`
-- `Content-Length`
-- `WWWAuthenticate`
-- `Location`
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the matching rules you can configure in the `AllowedClientHeaders` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}.AllowedClientHeadersOnSuccess[]`
-
-Specifies a set of rules for matching client response headers. Envoy adds headers from the external authorization service to the client response when the headers satisfy the rules and the authorization is successful. If the headers match the rules but the authorization fails or is denied, the headers are not added. If this field is not set, Envoy does not add any additional headers to the client's response on success.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the matching rules you can configure in the `AllowedClientHeadersOnSuccess` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.HttpService{}.AuthorizationResponse{}.DynamicMetadataFromHeaders[]`
-
-Specifies a set of rules for matching authorization response headers. Envoy emits headers from the external authorization service as dynamic metadata that the next filter in the chain can consume.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the matching rules you can configure in the `DynamicMetadataFromHeaders` field:
-
-@include 'envoy_ext_rule_matcher.mdx'
-
-### `Arguments{}.Config{}.IncludePeerCertificate`
-
-If set to `true`, Envoy includes the peer X.509 certificate in the authorization request if the certificate is available.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `Arguments{}.Config{}.MetadataContextNamespace[]`
-
-HTTP only field that specifies a list of metadata namespaces. The values of the namespaces are included in the authorization request context. The `consul` namespace is always included in addition to the namespaces you configure.
-
-#### Values
-
-- Default: `["consul"]`
-- Data type: List of string values
-
-### `Arguments{}.Config{}.StatusOnError`
-
-HTTP only field that specifies a return code status to respond with on error. Refer to the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/type/v3/http_status.proto#enum-type-v3-statuscode) for additional information.
-
-#### Values
-
-- Default: `403`
-- Data type: Integer
-
-### `Arguments{}.Config{}.StatPrefix`
-
-Specifies a prefix to add when writing statistics.
-
-#### Values
-
-- Default: `response`
-- Data type: String
-
-### `Arguments{}.Config{}.WithRequestBody{}`
-
-HTTP only field that configures Envoy to buffer the client request body and send it with the authorization request. If unset, the request body is not sent with the authorization request.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the parameters that you can include in the `WithRequestBody` field:
-
-| Parameter | Description | Data type | Default |
-| --- | --- | --- | --- |
-| `MaxRequestBytes` | Specifies the maximum size of the message body that the filter holds in memory. Envoy returns HTTP `403` and does not initiate the authorization process when the buffer reaches the number set in this field unless `AllowPartialMessage` is set to `true`. | uint32 | None |
-| `AllowPartialMessage` | If set to `true`, Envoy buffers the request body until the value of `MaxRequestBytes` is reached. The authorization request is dispatched with a partial body and no `413` HTTP error returns by the filter. | Boolean | `false` |
-| `PackAsBytes` | If set to `true`, Envoy sends the request body to the external authorization as raw bytes. Otherwise, Envoy sends the request body as a UTF-8 encoded string. | Boolean | `false` |
-
-## Examples
-
-The following examples demonstrate common configuration patterns for specific use cases.
-
-### Authorize gRPC requests to a URI
-
-In the following example, a service defaults configuration entry contains an `ext-authz` configuration. The configuration allows the `api` service to make gRPC authorization requests to a service at `localhost:9191`:
-
-```hcl
-Kind = "service-defaults"
-Name = "api"
-EnvoyExtensions = [
- {
- Name = "builtin/ext-authz"
- Arguments = {
- ProxyType = "connect-proxy"
- Config = {
- GrpcService = {
- Target = {
- URI = "127.0.0.1:9191"
- }
- }
- }
- }
- }
-]
-```
-
-### Upstream authorization
-
-In the following example, a service defaults configuration entry contains an `ext-authz` configuration. The configuration allows the `api` service to make gRPC authorization requests to a service named `authz`:
-
-```hcl
-Kind = "service-defaults"
-Name = "api"
-EnvoyExtensions = [
- {
- Name = "builtin/ext-authz"
- Arguments = {
- ProxyType = "connect-proxy"
- Config = {
- GrpcService = {
- Target = {
- Service = {
- Name = "authz"
- }
- }
- }
- }
- }
- }
-]
-```
-
-### Authorization requests after service intentions for Consul Enterprise
-
-In the following example for Consul Enterprise, the `api` service is configured to make an HTTP authorization requests to a service named `authz` in the `foo` namespace and `bar` partition. Envoy also inserts the external authorization filter after the `envoy.filters.http.rbac` filter:
-
-```hcl
-Kind = "service-defaults"
-Name = "api"
-Protocol = "http"
-EnvoyExtensions = [
- {
- Name = "builtin/ext-authz"
- Arguments = {
- ProxyType = "connect-proxy"
- InsertOptions = {
- Location = "AfterLastMatch"
- FilterName = "envoy.filters.http.rbac"
- }
- Config = {
- HttpService = {
- Target = {
- Service = {
- Name = "authz"
- Namespace = "foo"
- Partition = "bar"
- }
- }
- }
- }
- }
- }
-]
-```
diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/otel-access-logging.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/otel-access-logging.mdx
deleted file mode 100644
index 9cef3a563650..000000000000
--- a/website/content/docs/connect/proxies/envoy-extensions/configuration/otel-access-logging.mdx
+++ /dev/null
@@ -1,390 +0,0 @@
----
-layout: docs
-page_title: OpenTelemetry Access Logging extension configuration reference
-description: Learn how to configure the otel-access-logging Envoy extension, which is a builtin Consul extension that configures Envoy proxies to send access logs to OpenTelemetry collector service.
----
-
-# OpenTelemetry Access Logging extension configuration reference
-
-This topic describes how to configure the OpenTelemetry access logging Envoy extension, which configures Envoy proxies to send access logs to OpenTelemetry collector service. Refer to [Send access logs to OpenTelemetry collector service](/consul/docs/connect/proxies/envoy-extensions/usage/otel-access-logging) for usage information.
-
-## Configuration model
-
-The following list outlines the field hierarchy, data types, and requirements for the OpenTelemetry access logging configuration. Place the configuration inside the `EnvoyExtension.Arguments` field in the proxy defaults or service defaults configuration entry. Refer to the following documentation for additional information:
-
-- [`EnvoyExtensions` in proxy defaults](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions)
-- [`EnvoyExtensions` in service defaults](/consul/docs/connect/config-entries/service-defaults#envoyextensions)
-- [Envoy OpenTelemetry Access Logging Configuration documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/open_telemetry/v3/logs_service.proto#extensions-access-loggers-open-telemetry-v3-opentelemetryaccesslogconfig)
-
-Click on a property name to view additional details, including default values.
-
-- [`Name`](#name): string | required | must be set to `builtin/otel-access-logging`
-- [`Arguments`](#arguments): map | required
- - [`ProxyType`](#arguments-proxytype): string | required | `connect-proxy`
- - [`ListenerType`](#arguments-listenertype): string | required | `inbound`
- - [`Config`](#arguments-config): map | required
- - [`LogName`](#arguments-config-logname): string
- - [`GrpcService`](#arguments-config-grpcservice): map
- - [`Target`](#arguments-config-grpcservice-target): map | required
- - [`Service`](#arguments-config-grpcservice-target-service): map
- - [`Name`](#arguments-config-grpcservice-target-service): string
- - [`Namespace`](#arguments-config-grpcservice-target-service): string |
- - [`Partition`](#arguments-config-grpcservice-target-service): string |
- - [`URI`](#arguments-config-grpcservice-target-uri): string
- - [`Timeout`](#arguments-config-grpcservice-target-timeout): string | `1s`
- - [`Authority`](#arguments-config-grpcservice-authority): string
- - [`InitialMetadata`](#arguments-config-grpcservice-initialmetadata): list
- - [`Key`](#arguments-config-grpcservice-initialmetadata): string
- - [`Value`](#arguments-config-grpcservice-initialmetadata): string
- - [`BufferFlushInterval`](#arguments-config-bufferflushinterval): string
- - [`BufferSizeBytes`](#arguments-config-buffersizebytes): number
- - [`FilterStateObjectsToLog`](#arguments-config-filterstateobjectstolog): list of strings
- - [`RetryPolicy`](#arguments-config-retrypolicy): map
- - [`RetryBackOff`](#arguments-config-retrypolicy-retrybackoff): map
- - [`BaseInterval`](#arguments-config-retrypolicy-retrybackoff): string | `1s`
- - [`MaxInterval`](#arguments-config-retrypolicy-retrybackoff): string | `30s`
- - [`NumRetries`](#arguments-config-retrypolicy-numretries): number
- - [`Body`](#arguments-config-body): string, number, boolean or list of bytes
- - [`Attributes`](#arguments-config-attributes): map of string to string, number, boolean or list of bytes
- - [`ResourceAttributes`](#arguments-config-resourceattributes): map of string to string, number, boolean or list of bytes
-
-## Complete configuration
-
-When each field is defined, an `otel-access-logging` configuration has the following form:
-
-```hcl
-Name = "builtin/otel-access-logging"
-Arguments = {
- ProxyType = "connect-proxy"
- ListenerType = ""
- Config = {
- LogName = ""
- GrpcService = {
- Target = {
- Service = {
- Name = ""
- Namespace = ""
- Partition = ""
- }
- URI = ""
- Timeout = "1s"
- }
- Authority = ""
- InitialMetadata = [
- "" : ""
- ]
- }
- BufferFlushInterval = "1s"
- BufferSizeBytes = 16384
- FilterStateObjectsToLog = [
- "Additional filter state objects to log in filter_state_objects"
- ]
- RetryPolicy = {
- RetryBackOff = {
- BaseInterval = "1s"
- MaxInterval = "30s"
- }
- NumRetries =
- }
- Body = "Log Request Body"
- Attributes = {
- "" : ""
- }
- ResourceAttributes = {
- "" : ""
- }
-```
-
-## Specification
-
-This section provides details about the fields you can configure for the OpenTelemetry Access Logging extension.
-### `Name`
-
-Specifies the name of the extension. Must be set to `builtin/otel-access-logging`.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String value set to `builtin/otel-access-logging`.
-
-### `Arguments`
-
-Contains the global configuration for the extension.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.ProxyType`
-
-Specifies the type of Envoy proxy that this extension applies to. The extension only applies to proxies that match this type and is ignored for all other proxy types. The only supported value is `connect-proxy`.
-
-#### Values
-
-- Default: `connect-proxy`
-- This field is required.
-- Data type: String
-
-### `Arguments.ListenerType`
-
-Specifies the type of listener the extension applies to. The listener type is either `inbound` or `outbound`. If the listener type is set to `inbound`, Consul applies the extension so the access logging is enabled when other services in the mesh send messages to the service attached to the proxy. If the listener type is set to `outbound`, Consul applies the extension so the access logging is enabled when the attached proxy sends messages to other services in the mesh.
-
-#### Values
-
-- Default: `inbound`
-- This field is required.
-- Data type is one of the following string values:
- - `inbound`
- - `outbound`
-
-### `Arguments.Config`
-
-Contains the configuration settings for the extension.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.Config.LogName`
-
-Specifies the user-readable name of the access log to be returned in `StreamAccessLogsMessage.Identifier`. This allows the access log server to differentiate between different access logs coming from the same Envoy. If you leave it empty, it inherits the value from `ListenerType`.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Arguments.Config.GrpcService`
-
-Specifies the OpenTelemetry Access Logging configuration for gRPC requests.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.Config.GrpcService.Target`
-
-Configuration for specifying the service to send gRPC access logging requests to. The `Target` field may contain the following fields:
-
-- [`Service`](#arguments-config-grpcservice-target-service) or [`Uri`](#arguments-config-grpcservice-target-uri)
-- [`Timeout`](#arguments-config-grpcservice-target-timeout)
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-### `Arguments.Config.GrpcService.Target.Service`
-
-Specifies the upstream OpenTelemetry collector service. Configure this field when access logging requests are sent to an upstream service within the service mesh. The service must be configured as an upstream of the service that the filter is applied to.
-
-Configure either the `Service` field or the [`Uri`](#arguments-config-grpcservice-target-uri) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Uri`](#arguments-config-grpcservice-target-uri) is required.
-- Data type: Map
-
-The following table describes how to configure parameters for the `Service` field:
-
-| Parameter | Description | Data type | Default |
-| ----------- | ---------------------------------------------------------------------------------------------------- | --------- | --------- |
-| `Name` | Specifies the name of the upstream service. | String | None |
-| `Namespace` | Specifies the Consul namespace that the upstream service belongs to. | String | `default` |
-| `Partition` | Specifies the Consul admin partition that the upstream service belongs to. | String | `default` |
-
-### `Arguments.Config.GrpcService.Target.Uri`
-
-Specifies the URI of the OpenTelemetry collector service. Configure this field when you must provide an explicit URI to the OpenTelemetry collector service, such as cases in which the access logging service is running on the same host or pod. If set, the value of this field must be one of `localhost:`, `127.0.0.1:`, or `::1:`.
-
-Configure either the `Uri` field or the [`Service`](#arguments-config-grpcservice-target-service) field, but not both.
-
-#### Values
-
-- Default: None
-- This field or [`Service`](#arguments-config-grpcservice-target-service) is required.
-- Data type: String
-
-### `Arguments.Config.GrpcService.Target.Timeout`
-
-Specifies the maximum duration that a response can take to arrive upon request.
-
-#### Values
-
-- Default: `1s`
-- Data type: String
-
-### `Arguments.Config.GrpcService.Authority`
-
-Specifies the authority header to send in the gRPC request. If this field is not set, the authority field is set to the cluster name. This field does not override the SNI that Envoy sends to the OpenTelemetry collector service.
-
-#### Values
-
-- Default: Cluster name
-- Data type: String
-
-### `Arguments.Config.GrpcService.InitialMetadata`
-
-Specifies additional metadata to include in streams initiated to the `GrpcService`. You can specify metadata for injecting additional ad-hoc authorization headers, for example, `x-foo-bar: baz-key`. For more information, including details on header value syntax, refer to the [Envoy documentation on custom request headers](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#config-http-conn-man-headers-custom-request-headers).
-
-#### Values
-
-- Default: None
-- Data type: List of one or more key-value pairs:
-
- - KEY: String
- - VALUE: String
-
-### `Arguments.Config.BufferFlushInterval`
-
-Specifies an interval for flushing access logs to the gRPC stream. The logger flushes requests at the end of every interval or when the log reaches the batch size limit, whichever comes first.
-
-#### Values
-
-- Default: `1s`
-- Data type: String
-
-### `Arguments.Config.BufferSizeBytes`
-
-Specifies the soft size limit in bytes for the access log entries buffer. The logger buffers requests until it reaches this limit or every time the flush interval elapses, whichever comes first. Set this field to `0` to disable batching.
-
-#### Values
-
-- Default: `16384`
-- Data type: Integer
-
-### `Arguments.Config.FilterStateObjectsToLog`
-
-Specifies additional filter state objects to log in `filter_state_objects`. The logger calls `FilterState::Object::serializeAsProto` to serialize the filter state object.
-
-#### Values
-
-- Default: None
-- Data type: List of String
-
-### `Arguments.Config.RetryPolicy`
-
-Defines a policy for retrying requests to the upstream service when fetching the plugin data. The `RetryPolicy` field is a map containing the following parameters:
-
-- [`RetryBackoff`](#pluginconfig-vmconfig-code-remote-retrypolicy)
-- [`NumRetries`](#pluginconfig-vmconfig-code-remote-numretries)
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Arguments.Config.RetryPolicy.RetryBackOff`
-
-Specifies parameters that control retry backoff strategy.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-The following table describes the fields you can specify in the `RetryBackOff` map:
-
-| Parameter | Description | Data type | Default |
-| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------- |
-| `BaseInterval` | Specifies the base interval for determining the next backoff computation. Set a value greater than `0` and less than or equal to the `MaxInterval` value. | String | `1s` |
-| `MaxInterval` | Specifies the maximum interval between retries. Set the value greater than or equal to the `BaseInterval` value. | String | `10s` |
-
-### `Arguments.Config.RetryPolicy.NumRetries`
-
-Specifies the number of times Envoy retries to fetch plugin data if the initial attempt is unsuccessful.
-
-#### Values
-
-- Default: `1`
-- Data type: Integer
-
-### `Arguments.Config.Body`
-
-Specifies OpenTelemetry [LogResource](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto) fields, following [Envoy access logging formatting](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage). See ‘body’ in the LogResource proto for more details.
-
-#### Values
-
-- Default: None
-- Data type: String
-
-### `Arguments.Config.Attributes`
-
-Specifies `attributes` in the [LogResource](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto). Refer to `attributes` in the LogResource proto for more details.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-### `Arguments.Config.ResourceAttributes`
-
-Specifies OpenTelemetry [Resource](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto#L51) attributes are filled with Envoy node information.
-
-#### Values
-
-- Default: None
-- Data type: Map
-
-## Examples
-
-The following examples demonstrate common configuration patterns for specific use cases.
-
-### OpenTelemetry Access Logging requests to URI
-
-In the following example, a service defaults configuration entry contains an `otel-access-logging` configuration. The configuration allows the `api` service to make gRPC OpenTelemetry Access Logging requests to a service at `localhost:9191`:
-
-```hcl
-Kind = "service-defaults"
-Name = "api"
-EnvoyExtensions = [
- {
- Name = "builtin/otel-access-logging"
- Arguments = {
- ProxyType = "connect-proxy"
- Config = {
- GrpcService = {
- Target = {
- URI = "127.0.0.1:9191"
- }
- }
- }
- }
- }
-]
-```
-
-### Upstream OpenTelemetry Access Logging
-
-In the following example, a service defaults configuration entry contains an `otel-access-logging` configuration. The configuration allows the `api` service to make gRPC OpenTelemetry Access Logging requests to a service named `otel-collector`:
-
-```hcl
-Kind = "service-defaults"
-Name = "api"
-EnvoyExtensions = [
- {
- Name = "builtin/otel-access-logging"
- Arguments = {
- ProxyType = "connect-proxy"
- Config = {
- GrpcService = {
- Target = {
- Service = {
- Name = "otel-collector"
- }
- }
- }
- }
- }
- }
-]
-```
diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx
deleted file mode 100644
index 610371b303da..000000000000
--- a/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx
+++ /dev/null
@@ -1,280 +0,0 @@
----
-layout: docs
-page_title: Property override configuration reference
-description: Learn how to configure the property-override plugin, which is a builtin Consul plugin that allows you to set and remove Envoy proxy properties.
----
-
-# Property override configuration reference
-
-This topic describes how to configure the `property-override` extension so that you can set and remove individual properties on the Envoy resources Consul generates. Refer to [Configure Envoy proxy properties](/consul/docs/connect/proxies/envoy-extensions/usage/property-override) for usage information.
-
-## Configuration model
-
-The following list outlines the field hierarchy, data types, and requirements for the `property-override` configuration. Place the configuration inside the `EnvoyExtension.Arguments` field in the proxy defaults or service defaults configuration entry. Refer the following documentation for additional information:
-
-- [`EnvoyExtensions` in proxy defaults](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions)
-- [`EnvoyExtensions` in service defaults](/consul/docs/connect/config-entries/service-defaults#envoyextensions)
-
-Click on a property name to view additional details, including default values.
-
-- [`ProxyType`](#proxytype): string | `connect-proxy`
-- [`Debug`](#debug): bool | `false`
-- [`Patches`](#patches): list | required
- - [`ResourceFilter`](#patches-resourcefilter): map
- - [`ResourceType`](#patches-resourcefilter-resourcetype): string | required
- - [`TrafficDirection`](#patches-resourcefilter-trafficdirection): string | required
- - [`Services`](#patches-resourcefilter-services): list
- - [`Name`](#patches-resourcefilter-services-name): string
- - [`Namespace`](#patches-resourcefilter-services-namespace): string | `default` |
- - [`Partition`](#patches-resourcefilter-services-partition): string | `default` |
- - [`Op`](#patches-op): string | required
- - [`Path`](#patches-path): string | required
- - [`Value`](#patches-value): map, number, boolean, or string
-
-## Complete configuration
-
-When each field is defined, a `property-override` configuration has the following form:
-
-
-```hcl
-ProxyType = "connect-proxy"
-Debug = false
-Patches = [
- {
- ResourceFilter = {
- ResourceType = ""
- TrafficDirection = ""
- Services = [
- {
- Name = ""
- Namespace = ""
- Partition = ""
- }
- ]
- }
- Op = ""
- Path = ""
- Value = ""
- }
-]
-```
-
-## Specification
-
-This section provides details about the fields you can configure for the `property-override` extension.
-
-### `ProxyType`
-
-Specifies the type of Envoy proxy that the extension applies to. The only supported value is `connect-proxy`.
-
-#### Values
-
-- Default: `connect-proxy`
-- Data type: String
-
-### `Debug`
-
-Enables full debug mode. When `Debug` is set to `true`, all possible fields for the given `ResourceType` and first unmatched segment of `Path` are returned on error. When set to `false`, the error message only includes the first ten possible fields.
-
-#### Values
-
-- Default: `false`
-- Data type: Boolean
-
-### `Patches[]`
-
-Specifies a list of one or more JSON Patches that map to the Envoy proxy configurations you want to modify. Refer to [IETF RFC 6902](https://datatracker.ietf.org/doc/html/rfc6902/) for information about the JSON Patch specification.
-
-#### Values
-
-- Default: None
-- The `Patches` parameter is a list of configurations in JSON Patch format. Each patch can contain the following fields:
- - [`ResourceFilter`](#patches-resourcefilter)
- - [`Op`](#patches-op)
- - [`Path`](#patches-path)
- - [`Value`](#patches-value)
-
-
-### `Patches[].ResourceFilter{}`
-
-Specifies the filter for targeting specific Envoy resources. The `ResourceFilter` configuration is not part of the JSON Patch specification.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: Map
-
-The following table describes how to configure a `ResourceFilter`:
-
-| Parameter | Description | Type |
-| --- | --- | --- |
-| `ProxyType` | Specifies the proxy type that the extension applies to. The only supported value is `connect-proxy`. | String |
-| `ResourceType` | Specifies the Envoy resource type that the extension applies to. You can specify one of the following values for each `ResourceFilter`:
`cluster`
`cluster-load-assignment`
`route`
`listener`
| String |
-| `TrafficDirection` | Specifies the type of traffic that the extension applies to relative to the current proxy. You can specify one of the following values for each `ResourceFilter`:
`inbound`: Targets resources for the proxy's inbound traffic.
`outbound`: Targets resources for the proxy's upstream services.
| String |
-| `Services` | Specifies a list of services to target. Each member of the list has the following fields:
`Name`: Specifies the service associated with the traffic.
`Namespace`: Specifies the Consul Enterprise namespace the service is in.
`Partition`: Specifies the Consul Enterprise admin partition the service is in.
If `TrafficDirection` is set to `outbound`, upstream services in this field correspond to local Envoy resources that Consul patches at runtime.
Do not configure the `Services` field if `TrafficDirection` is set to `inbound`.
If this field is not set, Envoy targets all applicable resources. When patching outbound listeners, the patch includes the outbound transparent proxy listener only if `Services` is unset and if the local service is in transparent proxy mode. | List of maps |
-
-### `Patches[].Op`
-
-Specifies the JSON Patch operation to perform when the `ResourceFilter` matches a local Envoy proxy configuration. You can specify one of the following values for each patch:
-
-- `add`: Replaces a property or message specified by [`Path`](#patches-path) with the given value. The JSON Patch `add` operation does not merge objects. To emulate merges, you must configure discrete `add` operations for each changed field. Consul returns an error if the target field does not exist in the corresponding schema.
-- `remove`: Unsets the value of the field specified by [`Path`](#patches-path). If the field is not set, no changes are made. Consul returns an error if the target field does not exist in the corresponding schema.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type is one of the following string values:
- - `add`
- - `remove`
-
-### `Patches[].Path`
-
-Specifies where the extension performs the associated operation on the specified resource type. Refer to [`ResourceType`](#patches-resourcefilter) for information about specifying a resource type to target. Refer to [`Op`](#patches-op) for information about setting an operation to perform on the resources.
-
-The `Path` field does not support addressing array elements or protobuf map field entries. Refer to [Constructing paths](/consul/docs/connect/proxies/envoy-extensions/usage/property-override#constructing-paths) for information about how to construct paths.
-
-When setting fields, the extension sets any unset intermediate fields to their default values. A single operation on a nested field can set multiple intermediate fields. Because Consul sets the intermediate fields to their default values, you may need to configure subsequent patches to satisfy Envoy or Consul validation.
-
-#### Values
-
-- Default: None
-- This field is required.
-- Data type: String
-
-### `Patches[].Value{}`
-
-Defines a value to set at the specified [path](#patches-path) if the [operation](#patches-op) is set to `add`. You can specify either a scalar or enum value, an array of scalar or enum values (for repeated fields), or define a map that contains string keys and values corresponding to scalar or enum child fields. Single and repeated scalar and enum values are supported. Refer to the [example configurations](#examples) for additional guidance and to the [Envoy API documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/api) for additional information about Envoy proxy interfaces.
-
-If Envoy specifies a wrapper as the target field type, the extension automatically coerces simple values to the wrapped type when patching. For example, the value `32768` is allowed when targeting a cluster's `per_connection_buffer_limit_bytes`, which is a `UInt32Value` field. Refer to the [protobuf documentation](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/wrappers.proto) for additional information about wrappers.
-
-#### Values
-
-- Default: None
-- This field is required if [`Op`](#patches-op) is set to `add`, otherwise you must omit the field.
-- This field takes one of the following data types:
- - scalar
- - enum
- - map
-
-## Examples
-
-The following examples demonstrate patterns that you may be able to model your configurations on.
-
-### Enable `respect_dns_ttl` in a cluster
-
-In the following example, the `add` operation patches the outbound cluster corresponding to the `other-svc` upstream service to enable `respect_dns_ttl`. The `Path` specifies the [Cluster `/respect_dns_ttl`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto#envoy-v3-api-field-config-cluster-v3-cluster-respect-dns-ttl) top-level field and `Value` specifies a value of `true`:
-
-```hcl
-Kind = "service-defaults"
-Name = "my-svc"
-Protocol = "http"
-EnvoyExtensions = [
- {
- Name = "builtin/property-override",
- Arguments = {
- ProxyType = "connect-proxy",
- Patches = [
- {
- ResourceFilter = {
- ResourceType = "cluster"
- TrafficDirection = "outbound"
- Service = {
- Name = "other-svc"
- }
- }
- Op = "add"
- Path = "/respect_dns_ttl"
- Value = true
- }
- ]
- }
- }
-]
-```
-
-### Update multiple values in a message field
-
-In the following example, both `ResourceFilter` blocks target the cluster corresponding to the `other-svc` upstream service and modify [Cluster `/outlier_detection`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/outlier_detection.proto) properties:
-
-```hcl
-Kind = "service-defaults"
-Name = "my-svc"
-Protocol = "http"
-EnvoyExtensions = [
- {
- Name = "builtin/property-override",
- Arguments = {
- ProxyType = "connect-proxy",
- Patches = [
- {
- ResourceFilter = {
- ResourceType = "cluster"
- TrafficDirection = "outbound"
- Services = [{
- Name = "other-svc"
- }]
- }
- Op = "add"
- Path = "/outlier_detection/max_ejection_time/seconds"
- Value = 120
- },
- {
- ResourceFilter = {
- ResourceType = "cluster"
- TrafficDirection = "outbound"
- Services = [{
- Name = "other-svc"
- }]
- }
- Op = "add"
- Path = "/outlier_detection/max_ejection_time_jitter/seconds"
- Value = 1
- }
- ]
- }
- }
-]
-```
-
-The use of `/seconds` in these examples corresponds to the same field in the [google.protobuf.Duration](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/duration.proto) proto definition, since the extension does not support JSON serialized string forms of common protobuf types (e.g. `120s`).
-
--> **Note:** Using separate patches per field preserves any existing configuration of other fields in `outlier_detection` that may be directly set by Consul, such as [`enforcing_consecutive_5xx`](https://developer.hashicorp.com/consul/docs/connect/proxies/envoy#enforcing_consecutive_5xx).
-
-### Replace a message field
-
-In the following example, a `ResourceFilter` targets the cluster corresponding to the `other-svc` upstream service and _replaces_ the entire map of properties located at `/outlier_detection`, including explicitly set `enforcing_success_rate` and `success_rate_minimum_hosts` properties:
-
-```hcl
-Kind = "service-defaults"
-Name = "my-svc"
-Protocol = "http"
-EnvoyExtensions = [
- {
- Name = "builtin/property-override"
- Arguments = {
- ProxyType = "connect-proxy"
- Patches = [
- {
- ResourceFilter = {
- ResourceType = "cluster"
- TrafficDirection = "outbound"
- Services = [{
- Name = "other-svc"
- }]
- }
- Op = "add"
- Path = "/outlier_detection"
- Value = {
- "enforcing_success_rate" = 80
- "success_rate_minimum_hosts" = 2
- }
- }
- ]
- }
- }
-]
-```
-
-Unlike the previous example, other `/outlier_detection` values set by Consul will _not_ be retained unless they match Envoy's defaults, because the entire value of `/outlier_detection` will be replaced.
diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/wasm.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/wasm.mdx
deleted file mode 100644
index 6884112aba31..000000000000
--- a/website/content/docs/connect/proxies/envoy-extensions/configuration/wasm.mdx
+++ /dev/null
@@ -1,484 +0,0 @@
----
-layout: docs
-page_title: WebAssembly extension configuration reference
-description: Learn how to configure the wasm Envoy extension, which is a builtin Consul extension that allows you to run WebAssembly plugins in Envoy proxies.
----
-
-# WebAssembly extension configuration reference
-
-This topic describes how to configure the `wasm` extension, which directs Consul to run WebAssembly (Wasm) plugins in Envoy proxies. Refer to [Run WebAssembly plug-ins in Envoy proxy](/consul/docs/connect/proxies/envoy-extensions/usage/wasm) for usage information.
-
-## Configuration model
-
-The following list outlines the field hierarchy, data types, and requirements for the `wasm` configuration. Place the configuration inside the `EnvoyExtension.Arguments` field in the proxy defaults or service defaults configuration entry. Refer the following documentation for additional information:
-
-- [`EnvoyExtensions` in proxy defaults](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions)
-- [`EnvoyExtensions` in service defaults](/consul/docs/connect/config-entries/service-defaults#envoyextensions)
-
-Click on a property name to view additional details, including default values.
-
-- [`Protocol`](#protocol): string
-- [`ListenerType`](#listenertype): string | required
-- [`ProxyType`](#proxytype): string | `connect-proxy`
-- [`PluginConfig`](#pluginconfig): map | required
- - [`Name`](#pluginconfig-name): string
- - [`RootID`](#pluginconfig-rootid): string | required
- - [`VmConfig`](#pluginconfig-vmconfig): map
- - [`VmID`](#pluginconfig-vmconfig-vmid): string
- - [`Runtime`](#pluginconfig-vmconfig): string | `v8`
- - [`Code`](#pluginconfig-vmconfig-code): map
- - [`Local`](#pluginconfig-vmconfig-code-local): map
- - [`Filename`](#pluginconfig-vmconfig-code-local): string
- - [`Remote`](#pluginconfig-vmconfig-code-remote): map
- - [`HttpURI`](#pluginconfig-vmconfig-code-remote-httpuri): map
- - [`Service`](#pluginconfig-vmconfig-code-remote-httpuri-service): map
- - [`Name`](#pluginconfig-vmconfig-code-remote-httpuri-service): string
- - [`Namespace`](#pluginconfig-vmconfig-code-remote-httpuri-service): string
- - [`Partition`](#pluginconfig-vmconfig-code-remote-httpuri-service): string
- - [`URI`](#pluginconfig-vmconfig-code-remote-httpuri-uri): string
- - [`Timeout`](#pluginconfig-vmconfig-code-remote-httpuri-timeout): string
- - [`SHA256`](#pluginconfig-vmconfig-code-remote-sha256): string
- - [`RetryPolicy`](#pluginconfig-vmconfig-code-remote-retrypolicy): map
- - [`RetryBackOff`](#pluginconfig-vmconfig-code-remote-retrypolicy-retrybackoff): map
- - [`BaseInterval`](#pluginconfig-vmconfig-code-remote-retrypolicy-retrybackoff): string
- - [`MaxInterval`](#pluginconfig-vmconfig-code-remote-retrypolicy-retrybackoff): string
- - [`NumRetries`](#pluginconfig-vmconfig-code-remote-retrypolicy-numretries): number | `-1`
- - [`Configuration`](#pluginconfig-vmconfig-configuration): string
- - [`EnvironmentVariables`](#pluginconfig-vmconfig-environmentvariables): map
- - [`HostEnvKeys`](#pluginconfig-vmconfig-environmentvariables-hostenvkeys): list of strings
- - [`KeyValues`](#pluginconfig-vmconfig-environmentvariables-keyvalues): map
- - [`Configuration`](#pluginconfig-configuration): string
- - [`CapabilityRestrictionConfiguration`](#pluginconfig-vmconfig-capabilityrestrictionconfiguration): map
- - [`AllowedCapabilities`](#pluginconfig-vmconfig-capabilityrestrictionconfiguration): map of strings
-
-## Complete configuration
-
-When all parameters are set for the extension, the configuration has the following form:
-
-```hcl
-Protocol = ""
-ListenerType = ""
-ProxyType = "connect-proxy"
-PluginConfig = {
- Name = ""
- RootID = "