From efbe796c34e97d5db01fe6a01229c1b94584ce5d Mon Sep 17 00:00:00 2001
From: Shaun Struwig <41984034+Blargian@users.noreply.github.com>
Date: Tue, 24 Jun 2025 14:20:52 +0200
Subject: [PATCH 01/12] new replication guide - setup section
---
docs/architecture/cluster-deployment.md | 474 ++++++++++++++++++------
1 file changed, 370 insertions(+), 104 deletions(-)
diff --git a/docs/architecture/cluster-deployment.md b/docs/architecture/cluster-deployment.md
index af00312b8ac..74f7bd455fd 100644
--- a/docs/architecture/cluster-deployment.md
+++ b/docs/architecture/cluster-deployment.md
@@ -6,148 +6,414 @@ title: 'Cluster Deployment'
description: 'By going through this tutorial, you will learn how to set up a simple ClickHouse cluster.'
---
-This tutorial assumes you've already set up a [local ClickHouse server](../getting-started/install/install.mdx)
+## Prerequisites {#prerequisites}
-By going through this tutorial, you'll learn how to set up a simple ClickHouse cluster. It'll be small, but fault-tolerant and scalable. Then we will use one of the example datasets to fill it with data and execute some demo queries.
+- You've already set up a [local ClickHouse server](../getting-started/install/install.mdx)
+- You are familiar with basic configuration concepts of ClickHouse
+- You have docker installed on your machine
-## Cluster Deployment {#cluster-deployment}
+In this tutorial you'll learn how to set up a simple ClickHouse cluster,
+consisting of two shards and two replicas with 3 dedicated ClickHouse Keepers
+for managing coordination and keeping quorum in the cluster.
-This ClickHouse cluster will be a homogeneous cluster. Here are the steps:
+The architecture of the cluster we will be setting up is shown below:
-1. Install ClickHouse server on all machines of the cluster
-2. Set up cluster configs in configuration files
-3. Create local tables on each instance
-4. Create a [Distributed table](../engines/table-engines/special/distributed.md)
-A [distributed table](../engines/table-engines/special/distributed.md) is a kind of "view" to the local tables in a ClickHouse cluster. A SELECT query from a distributed table executes using resources of all cluster's shards. You may specify configs for multiple clusters and create multiple distributed tables to provide views for different clusters.
-Here is an example config for a cluster with three shards, with one replica each:
+
-```xml
-
-
-
-
- example-perftest01j.clickhouse.com
- 9000
-
-
-
-
- example-perftest02j.clickhouse.com
- 9000
-
-
-
-
- example-perftest03j.clickhouse.com
- 9000
-
-
-
-
+## Set up directory structure and test environment {#set-up}
+
+In this tutorial, you will use [Docker compose](https://docs.docker.com/compose/) to
+set up the ClickHouse cluster for simplicity. This setup could be modified to work
+for separate local machines, virtual machines or cloud instances as well.
+
+Run the following commands to set up the directory structure for the cluster:
+
+```bash
+mkdir clickhouse-cluster
+cd clickhouse-cluster
+
+# Create clickhouse-keeper directories
+for i in {01..03}; do
+ mkdir -p fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper
+done
+
+# Create clickhouse-server directories
+for i in {01..04}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server
+done
```
-For further demonstration, let's create a new local table with the same `CREATE TABLE` query that we used for `hits_v1` in the single node deployment tutorial, but with a different table name:
+Add the following `docker-compose.yml` file to the `clickhouse-cluster` directory:
-```sql
-CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ...
+```yaml
+version: '3.8'
+services:
+ clickhouse-01:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-01
+ hostname: clickhouse-01
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-01/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-01/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8123:8123"
+ - "127.0.0.1:9000:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-02:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-02
+ hostname: clickhouse-02
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-02/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8124:8123"
+ - "127.0.0.1:9001:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-03:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-03
+ hostname: clickhouse-03
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-03/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-03/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8125:8123"
+ - "127.0.0.1:9002:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-04:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-04
+ hostname: clickhouse-04
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-04/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-04/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8126:8123"
+ - "127.0.0.1:9003:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-keeper-01:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-01
+ hostname: clickhouse-keeper-01
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-01/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9181:9181"
+ clickhouse-keeper-02:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-02
+ hostname: clickhouse-keeper-02
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-02/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9182:9181"
+ clickhouse-keeper-03:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-03
+ hostname: clickhouse-keeper-03
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-03/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9183:9181"
```
-Creating a distributed table provides a view into the local tables of the cluster:
+Create the following sub-directories and files:
-```sql
-CREATE TABLE tutorial.hits_all AS tutorial.hits_local
-ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand());
+```bash
+for i in {01..04}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d/config.xml
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d/users.xml
+done
```
-A common practice is to create similar distributed tables on all machines of the cluster. This allows running distributed queries on any machine of the cluster. There's also an alternative option to create a temporary distributed table for a given SELECT query using [remote](../sql-reference/table-functions/remote.md) table function.
+- The `config.d` directory contains ClickHouse server configuration file `config.xml`,
+in which custom configuration for each ClickHouse node is defined. This
+configuration gets combined with the default `config.xml` ClickHouse configuration
+file that comes with every ClickHouse installation.
+- The `users.d` directory contains user configuration file `users.xml`, in which
+custom configuration for users is defined. This configuration gets combined with
+the default ClickHouse `users.xml` configuration file that comes with every
+ClickHouse installation.
-Let's run [INSERT SELECT](../sql-reference/statements/insert-into.md) into the distributed table to spread the table to multiple servers.
+
-```sql
-INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1;
-```
+:::tip Custom configuration directories
+It is a best practice to make use of the `config.d` and `users.d` directories when
+writing your own configuration, rather than directly modifying the default configuration
+in `/etc/clickhouse-server/config.xml` and `etc/clickhouse-server/users.xml`.
-As you would expect, computationally heavy queries run N times faster if they utilize 3 servers instead of one.
+The line
-In this case, we use a cluster with 3 shards, and each shard contains a single replica.
+```xml
+
+```
+
+Ensures that the configuration sections defined in the `config.d` and `users.d`
+directories override the default configuration sections defined in the default
+`config.xml` and `users.xml` files.
+:::
-To provide resilience in a production environment, we recommend that each shard contain 2-3 replicas spread between multiple availability zones or datacenters (or at least racks). Note that ClickHouse supports an unlimited number of replicas.
+## Configure ClickHouse nodes {#configure-clickhouse-servers}
-Here is an example config for a cluster of one shard containing three replicas:
+Now modify each empty configuration file `config.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/config.d`. The lines which are
+highlighted below need to be changed to be specific to each node:
```xml
-
- ...
-
-
-
- example-perftest01j.clickhouse.com
- 9000
-
-
- example-perftest02j.clickhouse.com
- 9000
-
-
- example-perftest03j.clickhouse.com
- 9000
-
-
-
-
+
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 3
+
+
+ cluster_2S_2R node 1
+ 0.0.0.0
+ 8123
+ 9000
+
+
+ users.xml
+
+
+ /var/lib/clickhouse/access/
+
+
+
+ /clickhouse/task_queue/ddl
+
+
+
+
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-03
+ 9000
+
+
+
+ true
+
+ clickhouse-02
+ 9000
+
+
+ clickhouse-04
+ 9000
+
+
+
+
+
+
+ clickhouse-keeper-01
+ 9181
+
+
+ clickhouse-keeper-02
+ 9181
+
+
+ clickhouse-keeper-03
+ 9181
+
+
+
+ 01
+ 01
+
+
```
-To enable native replication [ZooKeeper](http://zookeeper.apache.org/), is required. ClickHouse takes care of data consistency on all replicas and runs a restore procedure after a failure automatically. It's recommended to deploy the ZooKeeper cluster on separate servers (where no other processes including ClickHouse are running).
+| Directory | File |
+|-----------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-01/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-01/etc/clickhouse-server/config.d/config.xml) |
+| `fs/volumes/clickhouse-02/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml) |
+| `fs/volumes/clickhouse-03/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-03/etc/clickhouse-server/config.d/config.xml) |
+| `fs/volumes/clickhouse-04/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-04/etc/clickhouse-server/config.d/config.xml) |
+
+### Configuration explanation {#configuration-explanation}
-:::note Note
-ZooKeeper is not a strict requirement: in some simple cases, you can duplicate the data by writing it into all the replicas from your application code. This approach is **not** recommended, as in this case, ClickHouse won't be able to guarantee data consistency on all replicas. Thus, it becomes the responsibility of your application.
-:::
+Note that each node in the cluster has the same cluster configuration defined by the
+`` section.
-ZooKeeper locations are specified in the configuration file:
+
+Now modify each empty configuration file `users.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/users.d` with the following:
```xml
-
-
- zoo01.clickhouse.com
- 2181
-
-
- zoo02.clickhouse.com
- 2181
-
-
- zoo03.clickhouse.com
- 2181
-
-
+
+
+
+
+ 10000000000
+ 0
+ in_order
+ 1
+
+
+
+
+ 1
+ default
+
+ ::/0
+
+ default
+ 1
+ 1
+ 1
+ 1
+
+
+
+
+
+ 3600
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
```
-Also, we need to set macros for identifying each shard and replica which are used on table creation:
+:::note
+Each `users.xml` file is identical for all nodes in the cluster.
+:::
+
+## Configure ClickHouse Keeper nodes {#configure-clickhouse-keeper-nodes}
+
+Finally, create the `keeper_config.xml` files for each ClickHouse Keeper node
+using the following command:
+
+```bash
+for i in {01..03}; do
+ touch fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper/keeper_config.xml
+done
+```
+
+Modify these empty configuration files called `keeper_config.xml` in each
+node directory `fs/volumes/clickhouse-keeper-{}/etc/clickhouse-keeper`. The
+highlighted lines below need to be changed to be specific to each node:
```xml
-
- 01
- 01
-
+
+
+ information
+ /var/log/clickhouse-keeper/clickhouse-keeper.log
+ /var/log/clickhouse-keeper/clickhouse-keeper.err.log
+ 1000M
+ 3
+
+ 0.0.0.0
+
+ 9181
+
+ 1
+ /var/lib/clickhouse/coordination/log
+ /var/lib/clickhouse/coordination/snapshots
+
+ 10000
+ 30000
+ information
+
+
+
+ 1
+ clickhouse-keeper-01
+ 9234
+
+
+ 2
+ clickhouse-keeper-02
+ 9234
+
+
+ 3
+ clickhouse-keeper-03
+ 9234
+
+
+
+
+```
+
+### Configuration explanation {#configuration-explanation}
+
+
+
+## Test the setup {#test-the-setup}
+
+Make sure that docker is running on your machine.
+Start the cluster using the `docker-compose up` command from the `clickhouse-cluster` directory:
+
+```bash
+docker-compose up -d
+```
+
+You should see docker begin to pull the ClickHouse and Zookeeper images,
+and then start the containers:
+
+```bash
+[+] Running 8/8
+ ✔ Network clickhouse-cluster_default Created
+ ✔ Container clickhouse-keeper-03 Started
+ ✔ Container clickhouse-keeper-02 Started
+ ✔ Container clickhouse-keeper-01 Started
+ ✔ Container clickhouse-01 Started
+ ✔ Container clickhouse-02 Started
+ ✔ Container clickhouse-04 Started
+ ✔ Container clickhouse-03 Started
```
-If there are no replicas at the moment of replicated table creation, a new first replica is instantiated. If there are already live replicas, the new replica clones data from existing ones. You have an option to create all replicated tables first, and then insert data to it. Another option is to create some replicas and add the others after or during data insertion.
+To verify that the cluster is running, connect to any one of the nodes and run the
+following query.
+For the sake of this example, the command to connect to the
+first node is shown:
-```sql
-CREATE TABLE tutorial.hits_replica (...)
-ENGINE = ReplicatedMergeTree(
- '/clickhouse_perftest/tables/{shard}/hits',
- '{replica}'
-)
-...
+```bash
+# Connect to any node
+docker exec -it clickhouse-01 clickhouse-client
```
-Here we use the [ReplicatedMergeTree](../engines/table-engines/mergetree-family/replication.md) table engine. In parameters, we specify the ZooKeeper path containing the shard and replica identifiers.
+If successful, you will see the ClickHouse client prompt:
-```sql
-INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local;
+```response
+cluster_2S_2R node 1 :)
```
-Replication operates in multi-master mode. Data can be loaded into any replica, and the system then syncs it with other instances automatically. Replication is asynchronous so at a given moment, not all replicas may contain recently inserted data. At least one replica should be up to allow for data ingestion. Others will sync up data and repair consistency once they become active again. Note that this approach allows for the low possibility of loss of recently inserted data.
+
\ No newline at end of file
From 96359753a6db07c9a0e64874a19b0e32fb05b152 Mon Sep 17 00:00:00 2001
From: Shaun Struwig <41984034+Blargian@users.noreply.github.com>
Date: Tue, 24 Jun 2025 19:43:08 +0200
Subject: [PATCH 02/12] add steps for setting up distributed table
---
docs/architecture/cluster-deployment.md | 408 +++++++++++++++++++++++-
src/css/custom.scss | 23 ++
2 files changed, 418 insertions(+), 13 deletions(-)
diff --git a/docs/architecture/cluster-deployment.md b/docs/architecture/cluster-deployment.md
index 74f7bd455fd..b41c570c52d 100644
--- a/docs/architecture/cluster-deployment.md
+++ b/docs/architecture/cluster-deployment.md
@@ -6,20 +6,19 @@ title: 'Cluster Deployment'
description: 'By going through this tutorial, you will learn how to set up a simple ClickHouse cluster.'
---
+> In this tutorial, you'll learn how to set up a simple ClickHouse cluster
+consisting of two shards and two replicas with a 3-node ClickHouse Keeper cluster
+for managing coordination and keeping quorum in the cluster.
+
## Prerequisites {#prerequisites}
- You've already set up a [local ClickHouse server](../getting-started/install/install.mdx)
-- You are familiar with basic configuration concepts of ClickHouse
+- You are familiar with basic configuration concepts of ClickHouse such as [configuration files](/operations/configuration-files)
- You have docker installed on your machine
-In this tutorial you'll learn how to set up a simple ClickHouse cluster,
-consisting of two shards and two replicas with 3 dedicated ClickHouse Keepers
-for managing coordination and keeping quorum in the cluster.
-
The architecture of the cluster we will be setting up is shown below:
-
## Set up directory structure and test environment {#set-up}
@@ -47,7 +46,7 @@ done
Add the following `docker-compose.yml` file to the `clickhouse-cluster` directory:
-```yaml
+```yaml title="docker-compose.yml"
version: '3.8'
services:
clickhouse-01:
@@ -262,16 +261,90 @@ highlighted below need to be changed to be specific to each node:
| `fs/volumes/clickhouse-03/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-03/etc/clickhouse-server/config.d/config.xml) |
| `fs/volumes/clickhouse-04/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-04/etc/clickhouse-server/config.d/config.xml) |
-### Configuration explanation {#configuration-explanation}
+### Configuration explanation {#configuration-explanation-clickhouse}
+
+External communication to the network interface is enabled by activating the listen
+host setting. This ensures that the ClickHouse server host is reachable by other
+hosts.
+
+```xml
+0.0.0.0
+```
+
+Note that each node in the cluster gets the same cluster configuration defined by the
+`` section:
+
+```xml
+
+
+
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-03
+ 9000
+
+
+
+ true
+
+ clickhouse-02
+ 9000
+
+
+ clickhouse-04
+ 9000
+
+
+
+
+```
+
+The `` section defines the layout of the cluster,
+and acts as a template for distributed DDL queries, which are queries that execute
+across the cluster using the `ON CLUSTER` clause.
-Note that each node in the cluster has the same cluster configuration defined by the
-`` section.
+The `` section tells ClickHouse where ClickHouse Keeper (or ZooKeeper) is running.
+As we are using a ClickHouse Keeper cluster, each `` of the cluster needs to be specified,
+along with it's hostname and port number using the `` and `` tags respectively.
+Set-up of ClickHouse Keeper is explained in the next step of the tutorial.
+
+```xml
+
+
+ clickhouse-keeper-01
+ 9181
+
+
+ clickhouse-keeper-02
+ 9181
+
+
+ clickhouse-keeper-03
+ 9181
+
+
+```
+
+Additionally, the `` section is used to define parameter substitutions for
+replicated tables. These are listed in `system.macros` and allow using substitutions
+like `{shard}` and `{replica}` in queries.
+
+```xml
+
+ 01
+ 01
+
+```
Now modify each empty configuration file `users.xml` located at
`fs/volumes/clickhouse-{}/etc/clickhouse-server/users.d` with the following:
-```xml
+```xml title="/users.d/users.xml"
@@ -317,7 +390,20 @@ Each `users.xml` file is identical for all nodes in the cluster.
## Configure ClickHouse Keeper nodes {#configure-clickhouse-keeper-nodes}
-Finally, create the `keeper_config.xml` files for each ClickHouse Keeper node
+In order for replication to work, a ClickHouse keeper cluster needs to be set up and
+configured. ClickHouse Keeper provides the coordination system for data replication,
+acting as a stand in replacement for Zookeeper, which could also be used.
+ClickHouse Keeper is however recommended, as it provides better guarantees and
+reliability and uses fewer resources than ZooKeeper. For high availability and in
+order to keep quorum it is recommended to run at least 3 ClickHouse Keeper nodes.
+
+:::note
+ClickHouse Keeper can run on any node of the cluster alongside ClickHouse, although
+it is recommended to have it run on a dedicated node which allows to scale and
+manage the ClickHouse Keeper cluster independently from the database cluster.
+:::
+
+Create the `keeper_config.xml` files for each ClickHouse Keeper node
using the following command:
```bash
@@ -330,7 +416,7 @@ Modify these empty configuration files called `keeper_config.xml` in each
node directory `fs/volumes/clickhouse-keeper-{}/etc/clickhouse-keeper`. The
highlighted lines below need to be changed to be specific to each node:
-```xml
+```xml title="/config.d/config.xml"
information
@@ -374,7 +460,40 @@ highlighted lines below need to be changed to be specific to each node:
### Configuration explanation {#configuration-explanation}
+Each configuration file will contain the following unique configuration (shown below).
+The `server_id` used should be unique for that particular ClickHouse Keeper node
+in the cluster and match the server `` defined in the `` section.
+`tcp_port` is the port used by _clients_ of ClickHouse Keeper.
+
+```xml
+9181
+{id}
+```
+
+The following section is used to configure the servers that participate in the
+quorum for the [raft consensus algorithm](https://en.wikipedia.org/wiki/Raft_(algorithm)):
+```xml
+
+
+ 1
+ clickhouse-keeper-01
+
+
+ 9234
+
+
+ 2
+ clickhouse-keeper-02
+ 9234
+
+
+ 3
+ clickhouse-keeper-03
+ 9234
+
+
+```
## Test the setup {#test-the-setup}
@@ -416,4 +535,267 @@ If successful, you will see the ClickHouse client prompt:
cluster_2S_2R node 1 :)
```
+Run the following query to check what cluster topologies are defined for which
+hosts:
+
+```sql title="Query"
+SELECT
+ cluster,
+ shard_num,
+ replica_num,
+ host_name,
+ port
+FROM system.clusters;
+```
+
+```response title="Response"
+ ┌─cluster───────┬─shard_num─┬─replica_num─┬─host_name─────┬─port─┐
+1. │ cluster_2S_2R │ 1 │ 1 │ clickhouse-01 │ 9000 │
+2. │ cluster_2S_2R │ 1 │ 2 │ clickhouse-03 │ 9000 │
+3. │ cluster_2S_2R │ 2 │ 1 │ clickhouse-02 │ 9000 │
+4. │ cluster_2S_2R │ 2 │ 2 │ clickhouse-04 │ 9000 │
+5. │ default │ 1 │ 1 │ localhost │ 9000 │
+ └───────────────┴───────────┴─────────────┴───────────────┴──────┘
+```
+
+Run the following query to check the status of the ClickHouse Keeper cluster:
+
+```sql title="Query"
+SELECT *
+FROM system.zookeeper
+WHERE path IN ('/', '/clickhouse')
+```
+
+```response title="Response"
+ ┌─name───────┬─value─┬─path────────┐
+1. │ task_queue │ │ /clickhouse │
+2. │ sessions │ │ /clickhouse │
+3. │ keeper │ │ / │
+4. │ clickhouse │ │ / │
+ └────────────┴───────┴─────────────┘
+```
+
+With this you have successfully set up a ClickHouse cluster with two shards and two replicas.
+In the next step we will create a table in the cluster.
+
+## Creating a distributed database {#creating-a-table}
+
+In this tutorial, you will be recreating the same table as the one used in the
+[UK property prices](/getting-started/example-datasets/uk-price-paid) example dataset tutorial.
+It consists of around 30 million rows of prices paid for real-estate property in England and Wales
+since 1995.
+
+Start each client of each host, by running each of the following commands from separate terminal
+tabs or windows:
+
+```bash
+docker exec -it clickhouse-01 clickhouse-client
+docker exec -it clickhouse-02 clickhouse-client
+docker exec -it clickhouse-03 clickhouse-client
+docker exec -it clickhouse-04 clickhouse-client
+```
+
+You can run the query below from clickhouse-client of each host to confirm that there are no databases created yet,
+apart from the default ones:
+
+```sql title="Query"
+SHOW DATABASES;
+```
+
+```response title="Response"
+ ┌─name───────────────┐
+1. │ INFORMATION_SCHEMA │
+2. │ default │
+3. │ information_schema │
+4. │ system │
+ └────────────────────┘
+```
+
+From the `clickhouse-01` client run the following **distributed** DDL query using the `ON CLUSTER` clause to create a
+database:
+
+```sql
+CREATE DATABASE IF NOT EXISTS uk
+-- highlight-next-line
+ON CLUSTER cluster_2S_2R;
+```
+
+You can again run the same query as before from the client of each host
+to confirm that the database has been created across the cluster despite running
+the query only from `clickhouse-01`:
+
+```sql
+SHOW DATABASES;
+```
+
+```response
+ ┌─name───────────────┐
+1. │ INFORMATION_SCHEMA │
+2. │ default │
+3. │ information_schema │
+4. │ system │
+#highlight-next-line
+5. │ uk │
+ └────────────────────┘
+```
+
+## Creating a table using ON CLUSTER {#creating-a-table}
+
+Now that the database has been created, create a distributed table in the cluster.
+Run the following query from any of the host clients:
+
+```sql
+CREATE TABLE IF NOT EXISTS uk.uk_price_paid_local
+--highlight-next-line
+ON CLUSTER cluster_2S_2R
+(
+ price UInt32,
+ date Date,
+ postcode1 LowCardinality(String),
+ postcode2 LowCardinality(String),
+ type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0),
+ is_new UInt8,
+ duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0),
+ addr1 String,
+ addr2 String,
+ street LowCardinality(String),
+ locality LowCardinality(String),
+ town LowCardinality(String),
+ district LowCardinality(String),
+ county LowCardinality(String)
+)
+--highlight-next-line
+ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/{table}/{shard}', '{replica}')
+ORDER BY (postcode1, postcode2, addr1, addr2);
+```
+
+Notice that it is identical to the query used in the original `CREATE` statement of the
+[UK property prices](/getting-started/example-datasets/uk-price-paid) example dataset tutorial,
+except for the `ON CLUSTER` clause and the `ReplicatedMergeTree` engine.
+
+The `ON CLUSTER` clause is designed for distributed execution of DDL (Data Definition Language)
+queries such as `CREATE`, `DROP`, `ALTER`, and `RENAME`, ensuring that these
+schema changes are applied across all nodes in a cluster.
+
+The [`ReplicatedMergeTree`](https://clickhouse.com/docs/engines/table-engines/mergetree-family/replication#converting-from-mergetree-to-replicatedmergetree)
+engine works just as the ordinary `MergeTree` table engine, but it will also replicate the data. It requires two parameters to be specified:
+
+- `zoo_path`: The Keeper/ZooKeeper path to the table's metadata.
+- `replica_name`: The table's replica name.
+
+
+
+The `zoo_path` parameter can be set to anything you choose, although it is recommended to follow
+the convention of using prefix
+
+```
+/clickhouse/tables/{shard}/{database}/{table}
+```
+
+where:
+- `{database}` and `{table}` will be replaced automatically.
+- `{shard}` and `{replica}` are macros which were [defined](#configuration-explanation-clickhouse)
+ previously in the `config.xml` file of each ClickHouse node.
+
+You can run the query below from each host's client to confirm that the table has been created across the cluster:
+
+```sql title="Query"
+SHOW TABLES IN uk;
+```
+
+```response title="Response"
+ ┌─name──────────┐
+1. │ uk_price_paid │
+ └───────────────┘
+```
+
+## Insert data using a distributed table {#inserting-data-using-distributed}
+
+To insert data into the distributed table, `ON CLUSTER` cannot be used as it does
+not apply to DML (Data Manipulation Language) queries such as `INSERT`, `UPDATE`,
+and `DELETE`. In order to insert data, it is necessary to make use of the
+[`Distributed`](/engines/table-engines/special/distributed) table engine.
+
+From any of the host clients, run the following query to create a distributed table
+using the existing table we created previously with `ON CLUSTER` and
+`ReplicatedMergeTree`:
+
+```sql
+CREATE TABLE IF NOT EXISTS uk.uk_price_paid_distributed
+ON CLUSTER cluster_2S_2R
+ENGINE = Distributed('cluster_2S_2R', 'uk', 'uk_price_paid_local', rand());
+```
+
+On each host you will now see the following tables in the `uk` database:
+
+```sql
+ ┌─name──────────────────────┐
+1. │ uk_price_paid_distributed │
+2. │ uk_price_paid_local │
+ └───────────────────────────┘
+```
+
+Data can be inserted into the `uk_price_paid_distributed` table from any of the
+host clients using the following query:
+
+```sql
+INSERT INTO uk.uk_price_paid_distributed
+SELECT
+ toUInt32(price_string) AS price,
+ parseDateTimeBestEffortUS(time) AS date,
+ splitByChar(' ', postcode)[1] AS postcode1,
+ splitByChar(' ', postcode)[2] AS postcode2,
+ transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
+ b = 'Y' AS is_new,
+ transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
+ addr1,
+ addr2,
+ street,
+ locality,
+ town,
+ district,
+ county
+FROM url(
+ 'http://prod1.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
+ 'CSV',
+ 'uuid_string String,
+ price_string String,
+ time String,
+ postcode String,
+ a String,
+ b String,
+ c String,
+ addr1 String,
+ addr2 String,
+ street String,
+ locality String,
+ town String,
+ district String,
+ county String,
+ d String,
+ e String'
+) SETTINGS max_http_get_redirects=10;
+```
+
+Run the following query to confirm that the data inserted has been evenly distributed
+across the nodes of our cluster:
+
+```sql
+SELECT count(*)
+FROM uk.uk_price_paid_distributed
+LIMIT 10;
+
+SELECT count(*) FROM uk.uk_price_paid_local LIMIT 10;
+```
+
+```response
+ ┌──count()─┐
+1. │ 30212555 │ -- 30.21 million
+ └──────────┘
+
+ ┌──count()─┐
+1. │ 15105983 │ -- 15.11 million
+ └──────────┘
+```
+
\ No newline at end of file
diff --git a/src/css/custom.scss b/src/css/custom.scss
index 21ef6708b10..58f8e7c3ca2 100644
--- a/src/css/custom.scss
+++ b/src/css/custom.scss
@@ -17,6 +17,29 @@ html[data-theme="dark"] {
background-attachment: fixed;
}
+/* This is a fix for the issue where paragraphs have 1rem margin bottom but
+look strange when there is a code block after the paragraph
+*/
+p {
+ margin-bottom: 0rem;
+}
+
+/* We only want to have a gap between successive paragraphs elements,
+not between paragraphs and other elements like code blocks
+*/
+p + * {
+ margin-top: 1rem;
+}
+
+/*
+Fix for the issue where there is no gap between a paragraph which follows
+a list
+ */
+ul + p,
+ol + p {
+ margin-top: 1rem;
+}
+
body {
font-size: var(--default-font);
line-height: var(--default-line-height);
From 72831205c27737109437de579ab195b808881728 Mon Sep 17 00:00:00 2001
From: Shaun Struwig <41984034+Blargian@users.noreply.github.com>
Date: Tue, 24 Jun 2025 19:43:57 +0200
Subject: [PATCH 03/12] rename section
---
docs/architecture/cluster-deployment.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/architecture/cluster-deployment.md b/docs/architecture/cluster-deployment.md
index b41c570c52d..66c71d9afb0 100644
--- a/docs/architecture/cluster-deployment.md
+++ b/docs/architecture/cluster-deployment.md
@@ -639,7 +639,7 @@ SHOW DATABASES;
└────────────────────┘
```
-## Creating a table using ON CLUSTER {#creating-a-table}
+## Creating local tables on the cluster {#creating-a-table}
Now that the database has been created, create a distributed table in the cluster.
Run the following query from any of the host clients:
From cf526d6557d9a303d0b90f2821753232553d1c60 Mon Sep 17 00:00:00 2001
From: Shaun Struwig <41984034+Blargian@users.noreply.github.com>
Date: Wed, 25 Jun 2025 17:13:36 +0200
Subject: [PATCH 04/12] restructure and rewrite the replication guide
---
docs/deployment-guides/horizontal-scaling.md | 487 ------------
docs/deployment-guides/replicated.md | 560 -------------
.../01_1_shard_2_replicas.md | 738 ++++++++++++++++++
.../02_2_shards_1_replica.md | 212 +++++
.../03_2_shards_2_replicas.md} | 340 ++++----
.../_snippets/_config_explanation.mdx | 24 +
.../_snippets/_dedicated_keeper_servers.mdx | 8 +
.../_snippets/_keeper_config.mdx | 67 ++
.../_snippets/_keeper_explanation.mdx | 34 +
.../_snippets/_listen_host.mdx | 21 +
.../_snippets/_server_parameter_table.mdx | 6 +
.../_snippets/_verify_keeper_using_mntr.mdx | 70 ++
docs/deployment-guides/terminology.md | 25 +-
sidebars.js | 16 +-
.../architecture_1s_2r_3_nodes.png | Bin 546145 -> 0 bytes
.../replication.png | Bin 0 -> 66359 bytes
16 files changed, 1341 insertions(+), 1267 deletions(-)
delete mode 100644 docs/deployment-guides/horizontal-scaling.md
delete mode 100644 docs/deployment-guides/replicated.md
create mode 100644 docs/deployment-guides/replication-sharding-examples/01_1_shard_2_replicas.md
create mode 100644 docs/deployment-guides/replication-sharding-examples/02_2_shards_1_replica.md
rename docs/{architecture/cluster-deployment.md => deployment-guides/replication-sharding-examples/03_2_shards_2_replicas.md} (70%)
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_server_parameter_table.mdx
create mode 100644 docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx
delete mode 100644 static/images/deployment-guides/architecture_1s_2r_3_nodes.png
create mode 100644 static/images/deployment-guides/replication-sharding-examples/replication.png
diff --git a/docs/deployment-guides/horizontal-scaling.md b/docs/deployment-guides/horizontal-scaling.md
deleted file mode 100644
index 7215e598799..00000000000
--- a/docs/deployment-guides/horizontal-scaling.md
+++ /dev/null
@@ -1,487 +0,0 @@
----
-slug: /architecture/horizontal-scaling
-sidebar_label: 'Scaling out'
-sidebar_position: 10
-title: 'Scaling out'
-description: 'Page describing an example architecture designed to provide scalability'
----
-
-import Image from '@theme/IdealImage';
-import ReplicationShardingTerminology from '@site/docs/_snippets/_replication-sharding-terminology.md';
-import ConfigFileNote from '@site/docs/_snippets/_config-files.md';
-import scalingOut1 from '@site/static/images/deployment-guides/scaling-out-1.png';
-
-## Description {#description}
-This example architecture is designed to provide scalability. It includes three nodes: two combined ClickHouse plus coordination (ClickHouse Keeper) servers, and a third server with only ClickHouse Keeper to finish the quorum of three. With this example, we'll create a database, table, and a distributed table that will be able to query the data on both of the nodes.
-
-## Level: Basic {#level-basic}
-
-
-
-## Environment {#environment}
-### Architecture Diagram {#architecture-diagram}
-
-
-
-|Node|Description|
-|----|-----------|
-|`chnode1`|Data + ClickHouse Keeper|
-|`chnode2`|Data + ClickHouse Keeper|
-|`chnode3`|Used for ClickHouse Keeper quorum|
-
-:::note
-In production environments we strongly recommend that ClickHouse Keeper runs on dedicated hosts. This basic configuration runs the Keeper functionality within the ClickHouse Server process. The instructions for deploying ClickHouse Keeper standalone are available in the [installation documentation](/getting-started/install/install.mdx).
-:::
-
-## Install {#install}
-
-Install Clickhouse on three servers following the [instructions for your archive type](/getting-started/install/install.mdx) (.deb, .rpm, .tar.gz, etc.). For this example, you will follow the installation instructions for ClickHouse Server and Client on all three machines.
-
-## Editing configuration files {#editing-configuration-files}
-
-
-
-## chnode1 configuration {#chnode1-configuration}
-
-For `chnode1`, there are five configuration files. You may choose to combine these files into a single file, but for clarity in the documentation it may be simpler to look at them separately. As you read through the configuration files, you will see that most of the configuration is the same between `chnode1` and `chnode2`; the differences will be highlighted.
-
-### Network and logging configuration {#network-and-logging-configuration}
-
-These values can be customized as you wish. This example configuration gives you a debug log that will roll over at 1000M three times. ClickHouse will listen on the IPv4 network on ports 8123 and 9000, and will use port 9009 for interserver communication.
-
-```xml title="network-and-logging.xml on chnode1"
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 3
-
- clickhouse
- 0.0.0.0
- 8123
- 9000
- 9009
-
-```
-
-### ClickHouse Keeper configuration {#clickhouse-keeper-configuration}
-
-ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with Apache ZooKeeper. This configuration enables ClickHouse Keeper on port 9181. The highlighted line specifies that this instance of Keeper has `server_id` of 1. This is the only difference in the `enable-keeper.xml` file across the three servers. `chnode2` will have `server_id` set to `2`, and `chnode3` will have `server_id` set to `3`. The raft configuration section is the same on all three servers, and it is highlighted below to show you the relationship between `server_id` and the `server` instance within the raft configuration.
-
-:::note
-If for any reason a Keeper node is replaced or rebuilt, do not reuse an existing `server_id`. For example, if the Keeper node with `server_id` of `2` is rebuilt, give it server_id of `4` or higher.
-:::
-
-```xml title="enable-keeper.xml on chnode1"
-
-
- 9181
- # highlight-next-line
- 1
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
-
- 10000
- 30000
- trace
-
-
-
- # highlight-start
-
- 1
- chnode1
- 9234
-
- # highlight-end
-
- 2
- chnode2
- 9234
-
-
- 3
- chnode3
- 9234
-
-
-
-
-```
-
-### Macros configuration {#macros-configuration}
-
-The macros `shard` and `replica` reduce the complexity of distributed DDL. The values configured are automatically substituted in your DDL queries, which simplifies your DDL. The macros for this configuration specify the shard and replica number for each node.
-In this 2 shard 1 replica example, the replica macro is `replica_1` on both chnode1 and chnode2 as there is only one replica. The shard macro is `1` on chnode1 and `2` on chnode2.
-
-```xml title="macros.xml on chnode1"
-
-
- # highlight-next-line
- 1
- replica_1
-
-
-```
-
-### Replication and sharding configuration {#replication-and-sharding-configuration}
-
-Starting from the top:
-- The `remote_servers` section of the XML specifies each of the clusters in the environment. The attribute `replace=true` replaces the sample `remote_servers` in the default ClickHouse configuration with the `remote_servers` configuration specified in this file. Without this attribute, the remote servers in this file would be appended to the list of samples in the default.
-- In this example, there is one cluster named `cluster_2S_1R`.
-- A secret is created for the cluster named `cluster_2S_1R` with the value `mysecretphrase`. The secret is shared across all of the remote servers in the environment to ensure that the correct servers are joined together.
-- The cluster `cluster_2S_1R` has two shards, and each of those shards has one replica. Take a look at the architecture diagram toward the beginning of this document, and compare it with the two `shard` definitions in the XML below. In each of the shard definitions there is one replica. The replica is for that specific shard. The host and port for that replica is specified. The replica for the first shard in the configuration is stored on `chnode1`, and the replica for the second shard in the configuration is stored on `chnode2`.
-- Internal replication for the shards is set to true. Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to true, the write operation selects the first healthy replica and writes data to it.
-
-```xml title="remote-servers.xml on chnode1"
-
-
-
- mysecretphrase
-
- true
-
- chnode1
- 9000
-
-
-
- true
-
- chnode2
- 9000
-
-
-
-
-
-```
-
-### Configuring the use of Keeper {#configuring-the-use-of-keeper}
-
-Up above a few files ClickHouse Keeper was configured. This configuration file `use-keeper.xml` is configuring ClickHouse Server to use ClickHouse Keeper for the coordination of replication and distributed DDL. This file specifies that ClickHouse Server should use Keeper on nodes chnode1 - 3 on port 9181, and the file is the same on `chnode1` and `chnode2`.
-
-```xml title="use-keeper.xml on chnode1"
-
-
-
- chnode1
- 9181
-
-
- chnode2
- 9181
-
-
- chnode3
- 9181
-
-
-
-```
-
-## chnode2 configuration {#chnode2-configuration}
-
-As the configuration is very similar on `chnode1` and `chnode2`, only the differences will be pointed out here.
-
-### Network and logging configuration {#network-and-logging-configuration-1}
-
-```xml title="network-and-logging.xml on chnode2"
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 3
-
- clickhouse
- 0.0.0.0
- 8123
- 9000
- 9009
-
-```
-
-### ClickHouse Keeper configuration {#clickhouse-keeper-configuration-1}
-
-This file contains one of the two differences between `chnode1` and `chnode2`. In the Keeper configuration the `server_id` is set to `2`.
-
-```xml title="enable-keeper.xml on chnode2"
-
-
- 9181
- # highlight-next-line
- 2
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
-
- 10000
- 30000
- trace
-
-
-
-
- 1
- chnode1
- 9234
-
- # highlight-start
-
- 2
- chnode2
- 9234
-
- # highlight-end
-
- 3
- chnode3
- 9234
-
-
-
-
-```
-
-### Macros configuration {#macros-configuration-1}
-
-The macros configuration has one of the differences between `chnode1` and `chnode2`. `shard` is set to `2` on this node.
-
-```xml title="macros.xml on chnode2"
-
-
- # highlight-next-line
- 2
- replica_1
-
-
-```
-
-### Replication and sharding configuration {#replication-and-sharding-configuration-1}
-
-```xml title="remote-servers.xml on chnode2"
-
-
-
- mysecretphrase
-
- true
-
- chnode1
- 9000
-
-
-
- true
-
- chnode2
- 9000
-
-
-
-
-
-```
-
-### Configuring the use of Keeper {#configuring-the-use-of-keeper-1}
-
-```xml title="use-keeper.xml on chnode2"
-
-
-
- chnode1
- 9181
-
-
- chnode2
- 9181
-
-
- chnode3
- 9181
-
-
-
-```
-
-## chnode3 configuration {#chnode3-configuration}
-
-As `chnode3` is not storing data and is only used for ClickHouse Keeper to provide the third node in the quorum, `chnode3` has only two configuration files, one to configure the network and logging, and one to configure ClickHouse Keeper.
-
-### Network and logging configuration {#network-and-logging-configuration-2}
-
-```xml title="network-and-logging.xml on chnode3"
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 3
-
- clickhouse
- 0.0.0.0
- 8123
- 9000
- 9009
-
-```
-
-### ClickHouse Keeper configuration {#clickhouse-keeper-configuration-2}
-
-```xml title="enable-keeper.xml on chnode3"
-
-
- 9181
- # highlight-next-line
- 3
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
-
- 10000
- 30000
- trace
-
-
-
-
- 1
- chnode1
- 9234
-
-
- 2
- chnode2
- 9234
-
- # highlight-start
-
- 3
- chnode3
- 9234
-
- # highlight-end
-
-
-
-```
-
-## Testing {#testing}
-
-1. Connect to `chnode1` and verify that the cluster `cluster_2S_1R` configured above exists
-
-```sql title="Query"
-SHOW CLUSTERS
-```
-
-```response title="Response"
-┌─cluster───────┐
-│ cluster_2S_1R │
-└───────────────┘
-```
-
-2. Create a database on the cluster
-
-```sql title="Query"
-CREATE DATABASE db1 ON CLUSTER cluster_2S_1R
-```
-
-```response title="Response"
-┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
-│ chnode2 │ 9000 │ 0 │ │ 1 │ 0 │
-│ chnode1 │ 9000 │ 0 │ │ 0 │ 0 │
-└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘
-```
-
-3. Create a table with MergeTree table engine on the cluster.
-:::note
-We do not need not to specify parameters on the table engine since these will be automatically defined based on our macros
-:::
-
-```sql title="Query"
-CREATE TABLE db1.table1 ON CLUSTER cluster_2S_1R
-(
- `id` UInt64,
- `column1` String
-)
-ENGINE = MergeTree
-ORDER BY id
-```
-```response title="Response"
-┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
-│ chnode1 │ 9000 │ 0 │ │ 1 │ 0 │
-│ chnode2 │ 9000 │ 0 │ │ 0 │ 0 │
-└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘
-```
-
-4. Connect to `chnode1` and insert a row
-
-```sql title="Query"
-INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc');
-```
-
-5. Connect to `chnode2` and insert a row
-
-```sql title="Query"
-INSERT INTO db1.table1 (id, column1) VALUES (2, 'def');
-```
-
-6. Connect to either node, `chnode1` or `chnode2` and you will see only the row that was inserted into that table on that node.
-for example, on `chnode2`
-
-```sql title="Query"
-SELECT * FROM db1.table1;
-```
-
-```response title="Response"
-┌─id─┬─column1─┐
-│ 2 │ def │
-└────┴─────────┘
-```
-
-
-7. Create a distributed table to query both shards on both nodes.
-(In this example, the `rand()` function is set as the sharding key so that it randomly distributes each insert)
-
-```sql title="Query"
-CREATE TABLE db1.table1_dist ON CLUSTER cluster_2S_1R
-(
- `id` UInt64,
- `column1` String
-)
-ENGINE = Distributed('cluster_2S_1R', 'db1', 'table1', rand())
-```
-
-```response title="Response"
-┌─host────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
-│ chnode2 │ 9000 │ 0 │ │ 1 │ 0 │
-│ chnode1 │ 9000 │ 0 │ │ 0 │ 0 │
-└─────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘
-```
-
-8. Connect to either `chnode1` or `chnode2` and query the distributed table to see both rows.
-
-```sql title="Query"
-SELECT * FROM db1.table1_dist;
-```
-
-```reponse title="Response"
-┌─id─┬─column1─┐
-│ 2 │ def │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 1 │ abc │
-└────┴─────────┘
-```
-
-
-## More information about: {#more-information-about}
-
-- The [Distributed Table Engine](/engines/table-engines/special/distributed.md)
-- [ClickHouse Keeper](/guides/sre/keeper/index.md)
diff --git a/docs/deployment-guides/replicated.md b/docs/deployment-guides/replicated.md
deleted file mode 100644
index 2fd2463ea28..00000000000
--- a/docs/deployment-guides/replicated.md
+++ /dev/null
@@ -1,560 +0,0 @@
----
-slug: /architecture/replication
-sidebar_label: 'Replication for fault tolerance'
-sidebar_position: 10
-title: 'Replication for fault tolerance'
-description: 'Page describing an example architecture with five servers configured. Two are used to host copies of the data and the rest are used to coordinate the replication of data'
----
-
-import Image from '@theme/IdealImage';
-import ReplicationShardingTerminology from '@site/docs/_snippets/_replication-sharding-terminology.md';
-import ConfigFileNote from '@site/docs/_snippets/_config-files.md';
-import KeeperConfigFileNote from '@site/docs/_snippets/_keeper-config-files.md';
-import ReplicationArchitecture from '@site/static/images/deployment-guides/architecture_1s_2r_3_nodes.png';
-
-## Description {#description}
-In this architecture, there are five servers configured. Two are used to host copies of the data. The other three servers are used to coordinate the replication of data. With this example, we'll create a database and table that will be replicated across both data nodes using the ReplicatedMergeTree table engine.
-
-## Level: Basic {#level-basic}
-
-
-
-## Environment {#environment}
-### Architecture Diagram {#architecture-diagram}
-
-
-
-|Node|Description|
-|----|-----------|
-|clickhouse-01|Data|
-|clickhouse-02|Data|
-|clickhouse-keeper-01|Distributed coordination|
-|clickhouse-keeper-02|Distributed coordination|
-|clickhouse-keeper-03|Distributed coordination|
-
-:::note
-In production environments, we strongly recommend using *dedicated* hosts for ClickHouse keeper. In test environment it is acceptable to run ClickHouse Server and ClickHouse Keeper combined on the same server. The other basic example, [Scaling out](/deployment-guides/horizontal-scaling.md), uses this method. In this example we present the recommended method of separating Keeper from ClickHouse Server. The Keeper servers can be smaller, 4GB RAM is generally enough for each Keeper server until your ClickHouse Servers grow very large.
-:::
-
-## Install {#install}
-
-Install ClickHouse server and client on the two servers `clickhouse-01` and `clickhouse-02` following the [instructions for your archive type](/getting-started/install/install.mdx) (.deb, .rpm, .tar.gz, etc.).
-
-Install ClickHouse Keeper on the three servers `clickhouse-keeper-01`, `clickhouse-keeper-02` and `clickhouse-keeper-03` following the [instructions for your archive type](/getting-started/install/install.mdx) (.deb, .rpm, .tar.gz, etc.).
-
-## Editing configuration files {#editing-configuration-files}
-
-
-
-## clickhouse-01 configuration {#clickhouse-01-configuration}
-
-For clickhouse-01 there are five configuration files. You may choose to combine these files into a single file, but for clarity in the documentation it may be simpler to look at them separately. As you read through the configuration files you will see that most of the configuration is the same between clickhouse-01 and clickhouse-02; the differences will be highlighted.
-
-### Network and logging configuration {#network-and-logging-configuration}
-
-These values can be customized as you wish. This example configuration gives you:
-- a debug log that will roll over at 1000M three times
-- the name displayed when you connect with `clickhouse-client` is `cluster_1S_2R node 1`
-- ClickHouse will listen on the IPV4 network on ports 8123 and 9000.
-
-```xml title="/etc/clickhouse-server/config.d/network-and-logging.xml on clickhouse-01"
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 3
-
- cluster_1S_2R node 1
- 0.0.0.0
- 8123
- 9000
-
-```
-
-### Macros configuration {#macros-configuration}
-
-The macros `shard` and `replica` reduce the complexity of distributed DDL. The values configured are automatically substituted in your DDL queries, which simplifies your DDL. The macros for this configuration specify the shard and replica number for each node.
-In this 1 shard 2 replica example, the replica macro is `replica_1` on clickhouse-01 and `replica_2` on clickhouse-02. The shard macro is `1` on both clickhouse-01 and clickhouse-02 as there is only one shard.
-
-```xml title="/etc/clickhouse-server/config.d/macros.xml on clickhouse-01"
-
-
- 01
-
- 01
- cluster_1S_2R
-
-
-```
-
-### Replication and sharding configuration {#replication-and-sharding-configuration}
-
-Starting from the top:
-- The remote_servers section of the XML specifies each of the clusters in the environment. The attribute `replace=true` replaces the sample remote_servers in the default ClickHouse configuration with the remote_server configuration specified in this file. Without this attribute the remote servers in this file would be appended to the list of samples in the default.
-- In this example, there is one cluster named `cluster_1S_2R`.
-- A secret is created for the cluster named `cluster_1S_2R` with the value `mysecretphrase`. The secret is shared across all of the remote servers in the environment to ensure that the correct servers are joined together.
-- The cluster `cluster_1S_2R` has one shard, and two replicas. Take a look at the architecture diagram toward the beginning of this document, and compare it with the `shard` definition in the XML below. The shard definition contains two replicas. The host and port for each replica is specified. One replica is stored on `clickhouse-01`, and the other replica is stored on `clickhouse-02`.
-- Internal replication for the shard is set to true. Each shard can have the internal_replication parameter defined in the config file. If this parameter is set to true, the write operation selects the first healthy replica and writes data to it.
-
-```xml title="/etc/clickhouse-server/config.d/remote-servers.xml on clickhouse-01"
-
-
-
- mysecretphrase
-
- true
-
- clickhouse-01
- 9000
-
-
- clickhouse-02
- 9000
-
-
-
-
-
-```
-
-### Configuring the use of Keeper {#configuring-the-use-of-keeper}
-
-This configuration file `use-keeper.xml` is configuring ClickHouse Server to use ClickHouse Keeper for the coordination of replication and distributed DDL. This file specifies that ClickHouse Server should use Keeper on nodes clickhouse-keeper-01 - 03 on port 9181, and the file is the same on `clickhouse-01` and `clickhouse-02`.
-
-```xml title="/etc/clickhouse-server/config.d/use-keeper.xml on clickhouse-01"
-
-
-
-
- clickhouse-keeper-01
- 9181
-
-
- clickhouse-keeper-02
- 9181
-
-
- clickhouse-keeper-03
- 9181
-
-
-
-```
-
-## clickhouse-02 configuration {#clickhouse-02-configuration}
-
-As the configuration is very similar on clickhouse-01 and clickhouse-02 only the differences will be pointed out here.
-
-### Network and logging configuration {#network-and-logging-configuration-1}
-
-This file is the same on both clickhouse-01 and clickhouse-02, with the exception of `display_name`.
-
-```xml title="/etc/clickhouse-server/config.d/network-and-logging.xml on clickhouse-02"
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 3
-
-
- cluster_1S_2R node 2
- 0.0.0.0
- 8123
- 9000
-
-```
-
-### Macros configuration {#macros-configuration-1}
-
-The macros configuration is different between clickhouse-01 and clickhouse-02. `replica` is set to `02` on this node.
-
-```xml title="/etc/clickhouse-server/config.d/macros.xml on clickhouse-02"
-
-
- 01
-
- 02
- cluster_1S_2R
-
-
-```
-
-### Replication and sharding configuration {#replication-and-sharding-configuration-1}
-
-This file is the same on both clickhouse-01 and clickhouse-02.
-
-```xml title="/etc/clickhouse-server/config.d/remote-servers.xml on clickhouse-02"
-
-
-
- mysecretphrase
-
- true
-
- clickhouse-01
- 9000
-
-
- clickhouse-02
- 9000
-
-
-
-
-
-```
-
-### Configuring the use of Keeper {#configuring-the-use-of-keeper-1}
-
-This file is the same on both clickhouse-01 and clickhouse-02.
-
-```xml title="/etc/clickhouse-server/config.d/use-keeper.xml on clickhouse-02"
-
-
-
-
- clickhouse-keeper-01
- 9181
-
-
- clickhouse-keeper-02
- 9181
-
-
- clickhouse-keeper-03
- 9181
-
-
-
-```
-
-## clickhouse-keeper-01 configuration {#clickhouse-keeper-01-configuration}
-
-
-
-ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with Apache ZooKeeper. This configuration enables ClickHouse Keeper on port 9181. The highlighted line specifies that this instance of Keeper has server_id of 1. This is the only difference in the `enable-keeper.xml` file across the three servers. `clickhouse-keeper-02` will have `server_id` set to `2`, and `clickhouse-keeper-03` will have `server_id` set to `3`. The raft configuration section is the same on all three servers, it is highlighted below to show you the relationship between `server_id` and the `server` instance within the raft configuration.
-
-:::note
-If for any reason a Keeper node is replaced or rebuilt, do not reuse an existing `server_id`. For example, if the Keeper node with `server_id` of `2` is rebuilt, give it server_id of `4` or higher.
-:::
-
-```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-01"
-
-
- trace
- /var/log/clickhouse-keeper/clickhouse-keeper.log
- /var/log/clickhouse-keeper/clickhouse-keeper.err.log
- 1000M
- 3
-
- 0.0.0.0
-
- 9181
-
- 1
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
- 10000
- 30000
- trace
-
-
-
-
- 1
- clickhouse-keeper-01
- 9234
-
-
-
- 2
- clickhouse-keeper-02
- 9234
-
-
- 3
- clickhouse-keeper-03
- 9234
-
-
-
-
-```
-
-## clickhouse-keeper-02 configuration {#clickhouse-keeper-02-configuration}
-
-There is only one line difference between `clickhouse-keeper-01` and `clickhouse-keeper-02`. `server_id` is set to `2` on this node.
-
-```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-02"
-
-
- trace
- /var/log/clickhouse-keeper/clickhouse-keeper.log
- /var/log/clickhouse-keeper/clickhouse-keeper.err.log
- 1000M
- 3
-
- 0.0.0.0
-
- 9181
-
- 2
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
- 10000
- 30000
- trace
-
-
-
- 1
- clickhouse-keeper-01
- 9234
-
-
-
- 2
- clickhouse-keeper-02
- 9234
-
-
-
- 3
- clickhouse-keeper-03
- 9234
-
-
-
-
-```
-
-## clickhouse-keeper-03 configuration {#clickhouse-keeper-03-configuration}
-
-There is only one line difference between `clickhouse-keeper-01` and `clickhouse-keeper-03`. `server_id` is set to `3` on this node.
-
-```xml title="/etc/clickhouse-keeper/keeper_config.xml on clickhouse-keeper-03"
-
-
- trace
- /var/log/clickhouse-keeper/clickhouse-keeper.log
- /var/log/clickhouse-keeper/clickhouse-keeper.err.log
- 1000M
- 3
-
- 0.0.0.0
-
- 9181
-
- 3
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
- 10000
- 30000
- trace
-
-
-
- 1
- clickhouse-keeper-01
- 9234
-
-
- 2
- clickhouse-keeper-02
- 9234
-
-
-
- 3
- clickhouse-keeper-03
- 9234
-
-
-
-
-
-```
-
-## Testing {#testing}
-
-To gain experience with ReplicatedMergeTree and ClickHouse Keeper you can run the following commands which will have you:
-- Create a database on the cluster configured above
-- Create a table on the database using the ReplicatedMergeTree table engine
-- Insert data on one node and query it on another node
-- Stop one ClickHouse server node
-- Insert more data on the running node
-- Restart the stopped node
-- Verify that the data is available when querying the restarted node
-
-### Verify that ClickHouse Keeper is running {#verify-that-clickhouse-keeper-is-running}
-
-The `mntr` command is used to verify that the ClickHouse Keeper is running and to get state information about the relationship of the three Keeper nodes. In the configuration used in this example there are three nodes working together. The nodes will elect a leader, and the remaining nodes will be followers. The `mntr` command gives information related to performance, and whether a particular node is a follower or a leader.
-
-:::tip
-You may need to install `netcat` in order to send the `mntr` command to Keeper. Please see the [nmap.org](https://nmap.org/ncat/) page for download information.
-:::
-
-```bash title="run from a shell on clickhouse-keeper-01, clickhouse-keeper-02, and clickhouse-keeper-03"
-echo mntr | nc localhost 9181
-```
-```response title="response from a follower"
-zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726
-zk_avg_latency 0
-zk_max_latency 0
-zk_min_latency 0
-zk_packets_received 0
-zk_packets_sent 0
-zk_num_alive_connections 0
-zk_outstanding_requests 0
-# highlight-next-line
-zk_server_state follower
-zk_znode_count 6
-zk_watch_count 0
-zk_ephemerals_count 0
-zk_approximate_data_size 1271
-zk_key_arena_size 4096
-zk_latest_snapshot_size 0
-zk_open_file_descriptor_count 46
-zk_max_file_descriptor_count 18446744073709551615
-```
-
-```response title="response from a leader"
-zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726
-zk_avg_latency 0
-zk_max_latency 0
-zk_min_latency 0
-zk_packets_received 0
-zk_packets_sent 0
-zk_num_alive_connections 0
-zk_outstanding_requests 0
-# highlight-next-line
-zk_server_state leader
-zk_znode_count 6
-zk_watch_count 0
-zk_ephemerals_count 0
-zk_approximate_data_size 1271
-zk_key_arena_size 4096
-zk_latest_snapshot_size 0
-zk_open_file_descriptor_count 48
-zk_max_file_descriptor_count 18446744073709551615
-# highlight-start
-zk_followers 2
-zk_synced_followers 2
-# highlight-end
-```
-
-### Verify ClickHouse cluster functionality {#verify-clickhouse-cluster-functionality}
-
-Connect to node `clickhouse-01` with `clickhouse client` in one shell, and connect to node `clickhouse-02` with `clickhouse client` in another shell.
-
-1. Create a database on the cluster configured above
-
-```sql title="run on either node clickhouse-01 or clickhouse-02"
-CREATE DATABASE db1 ON CLUSTER cluster_1S_2R
-```
-```response
-┌─host──────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
-│ clickhouse-02 │ 9000 │ 0 │ │ 1 │ 0 │
-│ clickhouse-01 │ 9000 │ 0 │ │ 0 │ 0 │
-└───────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘
-```
-
-2. Create a table on the database using the ReplicatedMergeTree table engine
-```sql title="run on either node clickhouse-01 or clickhouse-02"
-CREATE TABLE db1.table1 ON CLUSTER cluster_1S_2R
-(
- `id` UInt64,
- `column1` String
-)
-ENGINE = ReplicatedMergeTree
-ORDER BY id
-```
-```response
-┌─host──────────┬─port─┬─status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
-│ clickhouse-02 │ 9000 │ 0 │ │ 1 │ 0 │
-│ clickhouse-01 │ 9000 │ 0 │ │ 0 │ 0 │
-└───────────────┴──────┴────────┴───────┴─────────────────────┴──────────────────┘
-```
-3. Insert data on one node and query it on another node
-```sql title="run on node clickhouse-01"
-INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc');
-```
-
-4. Query the table on the node `clickhouse-02`
-```sql title="run on node clickhouse-02"
-SELECT *
-FROM db1.table1
-```
-```response
-┌─id─┬─column1─┐
-│ 1 │ abc │
-└────┴─────────┘
-```
-
-5. Insert data on the other node and query it on the node `clickhouse-01`
-```sql title="run on node clickhouse-02"
-INSERT INTO db1.table1 (id, column1) VALUES (2, 'def');
-```
-
-```sql title="run on node clickhouse-01"
-SELECT *
-FROM db1.table1
-```
-```response
-┌─id─┬─column1─┐
-│ 1 │ abc │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 2 │ def │
-└────┴─────────┘
-```
-
-6. Stop one ClickHouse server node
-Stop one of the ClickHouse server nodes by running an operating system command similar to the command used to start the node. If you used `systemctl start` to start the node, then use `systemctl stop` to stop it.
-
-7. Insert more data on the running node
-```sql title="run on the running node"
-INSERT INTO db1.table1 (id, column1) VALUES (3, 'ghi');
-```
-
-Select the data:
-```sql title="run on the running node"
-SELECT *
-FROM db1.table1
-```
-```response
-┌─id─┬─column1─┐
-│ 1 │ abc │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 2 │ def │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 3 │ ghi │
-└────┴─────────┘
-```
-
-8. Restart the stopped node and select from there also
-
-```sql title="run on the restarted node"
-SELECT *
-FROM db1.table1
-```
-```response
-┌─id─┬─column1─┐
-│ 1 │ abc │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 2 │ def │
-└────┴─────────┘
-┌─id─┬─column1─┐
-│ 3 │ ghi │
-└────┴─────────┘
-```
diff --git a/docs/deployment-guides/replication-sharding-examples/01_1_shard_2_replicas.md b/docs/deployment-guides/replication-sharding-examples/01_1_shard_2_replicas.md
new file mode 100644
index 00000000000..ede9e1fc327
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/01_1_shard_2_replicas.md
@@ -0,0 +1,738 @@
+---
+slug: /architecture/replication
+sidebar_label: 'Replication'
+sidebar_position: 10
+title: 'Replicating data'
+description: 'Page describing an example architecture with five servers configured. Two are used to host copies of the data and the rest are used to coordinate the replication of data'
+---
+
+import Image from '@theme/IdealImage';
+import ReplicationShardingTerminology from '@site/docs/_snippets/_replication-sharding-terminology.md';
+import ConfigFileNote from '@site/docs/_snippets/_config-files.md';
+import KeeperConfigFileNote from '@site/docs/_snippets/_keeper-config-files.md';
+import ReplicationArchitecture from '@site/static/images/deployment-guides/replication-sharding-examples/replication.png';
+import ConfigExplanation from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx';
+import ListenHost from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx';
+import ServerParameterTable from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_server_parameter_table.mdx';
+import KeeperConfig from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx';
+import KeeperConfigExplanation from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx';
+import VerifyKeeperStatus from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx';
+import DedicatedKeeperServers from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx';
+
+> In this example, you'll learn how to set up a simple ClickHouse cluster which
+replicates the data. There are five servers configured. Two are used to host
+copies of the data. The other three servers are used to coordinate the replication
+of data. With this example, we'll create a database and table that will be
+replicated across both data nodes using the `ReplicatedMergeTree` table engine.
+
+The architecture of the cluster you will be setting up is shown below:
+
+
+
+
+
+## Prerequisites {#pre-requisites}
+
+- You've set up a [local ClickHouse server](../getting-started/install/install.mdx) before
+- You are familiar with basic configuration concepts of ClickHouse such as [configuration files](/operations/configuration-files)
+- You have docker installed on your machine
+
+
+
+## Set up directory structure and test environment {#set-up}
+
+In this tutorial, you will use [Docker compose](https://docs.docker.com/compose/) to
+set up the ClickHouse cluster. This setup could be modified to work
+for separate local machines, virtual machines or cloud instances as well.
+
+Run the following commands to set up the directory structure for this example:
+
+```bash
+mkdir cluster_1S_2R
+cd cluster_1S_2R
+
+# Create clickhouse-keeper directories
+for i in {01..03}; do
+ mkdir -p fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper
+done
+
+# Create clickhouse-server directories
+for i in {01..02}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server
+done
+```
+
+Add the following `docker-compose.yml` file to the `clickhouse-cluster` directory:
+
+```yaml title="docker-compose.yml"
+version: '3.8'
+services:
+ clickhouse-01:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-01
+ hostname: clickhouse-01
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-01/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-01/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8123:8123"
+ - "127.0.0.1:9000:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-02:
+ image: "clickhouse/clickhouse-server:latest"
+ user: "101:101"
+ container_name: clickhouse-02
+ hostname: clickhouse-02
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
+ - ${PWD}/fs/volumes/clickhouse-02/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
+ ports:
+ - "127.0.0.1:8124:8123"
+ - "127.0.0.1:9001:9000"
+ depends_on:
+ - clickhouse-keeper-01
+ - clickhouse-keeper-02
+ - clickhouse-keeper-03
+ clickhouse-keeper-01:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-01
+ hostname: clickhouse-keeper-01
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-01/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9181:9181"
+ clickhouse-keeper-02:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-02
+ hostname: clickhouse-keeper-02
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-02/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9182:9181"
+ clickhouse-keeper-03:
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ user: "101:101"
+ container_name: clickhouse-keeper-03
+ hostname: clickhouse-keeper-03
+ volumes:
+ - ${PWD}/fs/volumes/clickhouse-keeper-03/etc/clickhouse-keeper/keeper_config.xml:/etc/clickhouse-keeper/keeper_config.xml
+ ports:
+ - "127.0.0.1:9183:9181"
+```
+
+Create the following sub-directories and files:
+
+```bash
+for i in {01..02}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d/config.xml
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d/users.xml
+done
+```
+
+
+
+## Configure ClickHouse nodes {#configure-clickhouse-servers}
+
+### Server setup {#cluster-configuration}
+
+Now modify each empty configuration file `config.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/config.d`. The lines which are
+highlighted below need to be changed to be specific to each node:
+
+```xml
+
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 3
+
+
+ cluster_1S_2R node 1
+ 0.0.0.0
+ 8123
+ 9000
+
+
+ users.xml
+
+
+ /var/lib/clickhouse/access/
+
+
+
+ /clickhouse/task_queue/ddl
+
+
+
+
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-02
+ 9000
+
+
+
+
+
+
+ clickhouse-keeper-01
+ 9181
+
+
+ clickhouse-keeper-02
+ 9181
+
+
+ clickhouse-keeper-03
+ 9181
+
+
+
+
+ 01
+ 01
+ cluster_1S_2R
+
+
+
+```
+
+| Directory | File |
+|-----------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-01/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-01/etc/clickhouse-server/config.d/config.xml) |
+| `fs/volumes/clickhouse-02/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml) |
+
+Each section of the above configuration file is explained in more detail below.
+
+#### Networking and logging {#networking}
+
+
+
+Logging is defined in the `` block. This example configuration gives
+you a debug log that will roll over at 1000M three times:
+
+```xml
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 3
+
+```
+
+For more information on logging configuration, see the comments included in the
+default ClickHouse [configuration file](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml).
+
+#### Cluster configuration {#cluster-configuration}
+
+Configuration for the cluster is set up in the `` block.
+Here the cluster name `cluster_1S_2R` is defined.
+
+The `` block defines the layout of the cluster,
+using the `` and `` settings, and acts as a
+template for distributed DDL queries, which are queries that execute across the
+cluster using the `ON CLUSTER` clause. By default, distributed DDL queries
+are allowed, but can also be turned off with setting `allow_distributed_ddl_queries`.
+
+`internal_replication` is set to true so that data is written to just one of the replicas.
+
+```xml
+
+
+
+
+
+
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-02
+ 9000
+
+
+
+
+```
+
+
+
+#### Keeper configuration {#keeper-config-explanation}
+
+The `` section tells ClickHouse where ClickHouse Keeper (or ZooKeeper) is running.
+As we are using a ClickHouse Keeper cluster, each `` of the cluster needs to be specified,
+along with its hostname and port number using the `` and `` tags respectively.
+
+Set up of ClickHouse Keeper is explained in the next step of the tutorial.
+
+```xml
+
+
+ clickhouse-keeper-01
+ 9181
+
+
+ clickhouse-keeper-02
+ 9181
+
+
+ clickhouse-keeper-03
+ 9181
+
+
+```
+
+:::note
+Although it is possible to run ClickHouse Keeper on the same server as ClickHouse Server,
+in production environments we strongly recommend that ClickHouse Keeper runs on dedicated hosts.
+:::
+
+#### Macros configuration {#macros-config-explanation}
+
+Additionally, the `` section is used to define parameter substitutions for
+replicated tables. These are listed in `system.macros` and allow using substitutions
+like `{shard}` and `{replica}` in queries.
+
+```xml
+
+ 01
+ 01
+ cluster_1S_2R
+
+```
+
+:::note
+These will be defined uniquely depending on the layout of the cluster.
+:::
+
+### User configuration {#user-config}
+
+Now modify each empty configuration file `users.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/users.d` with the following:
+
+```xml title="/users.d/users.xml"
+
+
+
+
+ 10000000000
+ 0
+ in_order
+ 1
+
+
+
+
+ 1
+ default
+
+ ::/0
+
+ default
+ 1
+ 1
+ 1
+ 1
+
+
+
+
+
+ 3600
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+```
+
+| Directory | File |
+|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-01/etc/clickhouse-server/users.d` | [`users.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-01/etc/clickhouse-server/users.d/users.xml) |
+| `fs/volumes/clickhouse-02/etc/clickhouse-server/users.d` | [`users.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/users.d/users.xml) |
+
+In this example, the default user is configured without a password for simplicity.
+In practice, this is discouraged.
+
+:::note
+In this example, each `users.xml` file is identical for all nodes in the cluster.
+:::
+
+## Configure ClickHouse Keeper {#configure-clickhouse-keeper-nodes}
+
+### Keeper setup {#configuration-explanation}
+
+
+
+| Directory | File |
+|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-keeper-01/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-01/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-02/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-02/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-03/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-03/etc/clickhouse-keeper/keeper_config.xml) |
+
+
+
+## Test the setup {#test-the-setup}
+
+Make sure that docker is running on your machine.
+Start the cluster using the `docker-compose up` command from the root of the `cluster_1S_2R` directory:
+
+```bash
+docker-compose up -d
+```
+
+You should see docker begin to pull the ClickHouse and Keeper images,
+and then start the containers:
+
+```bash
+[+] Running 6/6
+ ✔ Network cluster_1s_2r_default Created
+ ✔ Container clickhouse-keeper-03 Started
+ ✔ Container clickhouse-keeper-02 Started
+ ✔ Container clickhouse-keeper-01 Started
+ ✔ Container clickhouse-01 Started
+ ✔ Container clickhouse-02 Started
+```
+
+To verify that the cluster is running, connect to either `clickhouse-01` or `clickhouse-02` and run the
+following query. The command to connect to the first node is shown:
+
+```bash
+# Connect to any node
+docker exec -it clickhouse-01 clickhouse-client
+```
+
+If successful, you will see the ClickHouse client prompt:
+
+```response
+cluster_1S_2R node 1 :)
+```
+
+Run the following query to check what cluster topologies are defined for which
+hosts:
+
+```sql title="Query"
+SELECT
+ cluster,
+ shard_num,
+ replica_num,
+ host_name,
+ port
+FROM system.clusters;
+```
+
+```response title="Response"
+ ┌─cluster───────┬─shard_num─┬─replica_num─┬─host_name─────┬─port─┐
+1. │ cluster_1S_2R │ 1 │ 1 │ clickhouse-01 │ 9000 │
+2. │ cluster_1S_2R │ 1 │ 2 │ clickhouse-02 │ 9000 │
+3. │ default │ 1 │ 1 │ localhost │ 9000 │
+ └───────────────┴───────────┴─────────────┴───────────────┴──────┘
+```
+
+Run the following query to check the status of the ClickHouse Keeper cluster:
+
+```sql title="Query"
+SELECT *
+FROM system.zookeeper
+WHERE path IN ('/', '/clickhouse')
+```
+
+```response title="Response"
+ ┌─name───────┬─value─┬─path────────┐
+1. │ sessions │ │ /clickhouse │
+2. │ task_queue │ │ /clickhouse │
+3. │ keeper │ │ / │
+4. │ clickhouse │ │ / │
+ └────────────┴───────┴─────────────┘
+```
+
+
+
+With this, you have successfully set up a ClickHouse cluster with a single shard and two replicas.
+In the next step, you will create a table in the cluster.
+
+## Create a database {#creating-a-database}
+
+Now that you have verified the cluster is correctly setup and is running, you
+will be recreating the same table as the one used in the [UK property prices](/getting-started/example-datasets/uk-price-paid)
+example dataset tutorial. It consists of around 30 million rows of prices paid
+for real-estate property in England and Wales since 1995.
+
+Connect to the client of each host by running each of the following commands from separate terminal
+tabs or windows:
+
+```bash
+docker exec -it clickhouse-01 clickhouse-client
+docker exec -it clickhouse-02 clickhouse-client
+```
+
+You can run the query below from clickhouse-client of each host to confirm that
+there are no databases created yet, apart from the default ones:
+
+```sql title="Query"
+SHOW DATABASES;
+```
+
+```response title="Response"
+ ┌─name───────────────┐
+1. │ INFORMATION_SCHEMA │
+2. │ default │
+3. │ information_schema │
+4. │ system │
+ └────────────────────┘
+```
+
+From the `clickhouse-01` client run the following **distributed** DDL query using the
+`ON CLUSTER` clause to create a new database called `uk`:
+
+```sql
+CREATE DATABASE IF NOT EXISTS uk
+-- highlight-next-line
+ON CLUSTER cluster_1S_2R;
+```
+
+You can again run the same query as before from the client of each host
+to confirm that the database has been created across the cluster despite running
+the query only `clickhouse-01`:
+
+```sql
+SHOW DATABASES;
+```
+
+```response
+ ┌─name───────────────┐
+1. │ INFORMATION_SCHEMA │
+2. │ default │
+3. │ information_schema │
+4. │ system │
+#highlight-next-line
+5. │ uk │
+ └────────────────────┘
+```
+
+## Create a table on the cluster {#creating-a-table}
+
+Now that the database has been created, create a distributed table on the cluster.
+Run the following query from any of the host clients:
+
+```sql
+CREATE TABLE IF NOT EXISTS uk.uk_price_paid
+--highlight-next-line
+ON CLUSTER cluster_1S_2R
+(
+ price UInt32,
+ date Date,
+ postcode1 LowCardinality(String),
+ postcode2 LowCardinality(String),
+ type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0),
+ is_new UInt8,
+ duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0),
+ addr1 String,
+ addr2 String,
+ street LowCardinality(String),
+ locality LowCardinality(String),
+ town LowCardinality(String),
+ district LowCardinality(String),
+ county LowCardinality(String)
+)
+--highlight-next-line
+ENGINE = ReplicatedMergeTree
+ORDER BY (postcode1, postcode2, addr1, addr2);
+```
+
+Notice that it is identical to the query used in the original `CREATE` statement of the
+[UK property prices](/getting-started/example-datasets/uk-price-paid) example dataset tutorial,
+except for the `ON CLUSTER` clause and use of the `ReplicatedMergeTree` engine.
+
+The `ON CLUSTER` clause is designed for distributed execution of DDL (Data Definition Language)
+queries such as `CREATE`, `DROP`, `ALTER`, and `RENAME`, ensuring that these
+schema changes are applied across all nodes in a cluster.
+
+The [`ReplicatedMergeTree`](https://clickhouse.com/docs/engines/table-engines/mergetree-family/replication#converting-from-mergetree-to-replicatedmergetree)
+engine works just as the ordinary `MergeTree` table engine, but it will also replicate the data.
+
+You can run the query below from either `clickhouse-01` or `clickhouse-02` client
+to confirm that the table has been created across the cluster:
+
+```sql title="Query"
+SHOW TABLES IN uk;
+```
+
+```response title="Response"
+ ┌─name────────────────┐
+1. │ uk_price_paid. │
+ └─────────────────────┘
+```
+
+## Insert data {#inserting-data}
+
+Now insert data from `clickhouse-01`:
+
+```sql
+INSERT INTO uk.uk_price_paid
+SELECT
+ toUInt32(price_string) AS price,
+ parseDateTimeBestEffortUS(time) AS date,
+ splitByChar(' ', postcode)[1] AS postcode1,
+ splitByChar(' ', postcode)[2] AS postcode2,
+ transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
+ b = 'Y' AS is_new,
+ transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
+ addr1,
+ addr2,
+ street,
+ locality,
+ town,
+ district,
+ county
+FROM url(
+ 'http://prod1.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
+ 'CSV',
+ 'uuid_string String,
+ price_string String,
+ time String,
+ postcode String,
+ a String,
+ b String,
+ c String,
+ addr1 String,
+ addr2 String,
+ street String,
+ locality String,
+ town String,
+ district String,
+ county String,
+ d String,
+ e String'
+) SETTINGS max_http_get_redirects=10;
+```
+
+Query the table from `clickhouse-02` or `clickhouse-01`:
+
+```sql title="Query"
+SELECT count(*) FROM uk.uk_price_paid;
+```
+
+```response title="Response"
+ ┌──count()─┐
+1. │ 30212555 │ -- 30.21 million
+ └──────────┘
+```
+
+To demonstrate what happens when one of the hosts fails, create a simple test database
+and test table from either of the hosts:
+
+```sql
+CREATE DATABASE IF NOT EXISTS test ON CLUSTER cluster_1S_2R;
+CREATE TABLE test.test_table ON CLUSTER cluster_1S_2R
+(
+ `id` UInt64,
+ `name` String
+)
+ENGINE = ReplicatedMergeTree
+ORDER BY id;
+```
+
+As with the `uk_price_paid` table, we can insert data from either host:
+
+```sql
+INSERT INTO test.test_table (id, name) VALUES (1, 'Clicky McClickface');
+```
+
+But what will happen if one of the hosts is down? To simulate this, stop
+`clickhouse-01` by running:
+
+```bash
+docker stop clickhouse-01
+```
+
+Check that the host is down by running:
+
+```bash
+docker-compose ps
+```
+
+```response title="Response"
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+clickhouse-02 clickhouse/clickhouse-server:latest "/entrypoint.sh" clickhouse-02 X minutes ago Up X minutes 127.0.0.1:8124->8123/tcp, 127.0.0.1:9001->9000/tcp
+clickhouse-keeper-01 clickhouse/clickhouse-keeper:latest-alpine "/entrypoint.sh" clickhouse-keeper-01 X minutes ago Up X minutes 127.0.0.1:9181->9181/tcp
+clickhouse-keeper-02 clickhouse/clickhouse-keeper:latest-alpine "/entrypoint.sh" clickhouse-keeper-02 X minutes ago Up X minutes 127.0.0.1:9182->9181/tcp
+clickhouse-keeper-03 clickhouse/clickhouse-keeper:latest-alpine "/entrypoint.sh" clickhouse-keeper-03 X minutes ago Up X minutes 127.0.0.1:9183->9181/tcp
+```
+
+With `clickhouse-01` now down, insert another row of data into the test table
+and query the table:
+
+```sql
+INSERT INTO test.test_table (id, name) VALUES (2, 'Alexey Milovidov');
+SELECT * FROM test.test_table;
+```
+
+```response title="Response"
+ ┌─id─┬─name───────────────┐
+1. │ 1 │ Clicky McClickface │
+2. │ 2 │ Alexey Milovidov │
+ └────┴────────────────────┘
+```
+
+Now restart `clickhouse-01` with the following command (you can run `docker-compose ps` again after to confirm):
+
+```sql
+docker start clickhouse-01
+```
+
+Query the test table again from `clickhouse-01` after running `docker exec -it clickhouse-01 clickhouse-client`:
+
+```sql title="Query"
+SELECT * FROM test.test_table
+```
+
+```response title="Response"
+ ┌─id─┬─name───────────────┐
+1. │ 1 │ Clicky McClickface │
+2. │ 2 │ Alexey Milovidov │
+ └────┴────────────────────┘
+```
+
+
+
+## Conclusion {#conclusion}
+
+As you saw, the advantage of this cluster topology is that with two replicas,
+your data exists on two separate hosts. If one host fails, the other replica
+continues serving data without any loss. This eliminates single points of
+failure at the storage level.
+
+When one host goes down, the remaining replica is still able to:
+- Handle read queries without interruption
+- Accept new writes (depending on your consistency settings)
+- Maintain service availability for applications
+
+When the failed host comes back online, it is able to:
+- Automatically sync missing data from the healthy replica
+- Resume normal operations without manual intervention
+- Restore full redundancy quickly
+
+In the next example, we'll look at how to set up a cluster with two shards but
+only one replica.
diff --git a/docs/deployment-guides/replication-sharding-examples/02_2_shards_1_replica.md b/docs/deployment-guides/replication-sharding-examples/02_2_shards_1_replica.md
new file mode 100644
index 00000000000..48b023b0b45
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/02_2_shards_1_replica.md
@@ -0,0 +1,212 @@
+---
+slug: /architecture/horizontal-scaling
+sidebar_label: 'Scaling'
+sidebar_position: 10
+title: 'Scaling'
+description: 'Page describing an example architecture designed to provide scalability'
+---
+
+import Image from '@theme/IdealImage';
+import ReplicationShardingTerminology from '@site/docs/_snippets/_replication-sharding-terminology.md';
+import ConfigFileNote from '@site/docs/_snippets/_config-files.md';
+import scalingOut1 from '@site/static/images/deployment-guides/scaling-out-1.png';
+import DedicatedKeeperServers from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx';
+
+> In this example, you'll learn how to set up a simple ClickHouse cluster which
+scales. It includes three nodes: two combined ClickHouse plus coordination (ClickHouse Keeper) servers,
+and a third server with only ClickHouse Keeper to finish the quorum of three.
+With this example, we'll create a database, table, and a distributed table that
+will be able to query the data on both of the nodes.
+
+The architecture of the cluster you will be setting up is shown below:
+
+
+
+
+
+## Prerequisites {#pre-requisites}
+
+- You've set up a [local ClickHouse server](../getting-started/install/install.mdx) before
+- You are familiar with basic configuration concepts of ClickHouse such as [configuration files](/operations/configuration-files)
+- You have docker installed on your machine
+
+
+
+## Set up directory structure and test environment {#set-up}
+
+In this tutorial, you will use [Docker compose](https://docs.docker.com/compose/) to
+set up the ClickHouse cluster. This setup could be modified to work
+for separate local machines, virtual machines or cloud instances as well.
+
+Run the following commands to set up the directory structure for this example:
+
+```bash
+mkdir cluster_2S_1R
+cd cluster_2S_1R
+
+# Create clickhouse-keeper directories
+for i in {01..03}; do
+ mkdir -p fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper
+done
+
+# Create clickhouse-server directories
+for i in {01..02}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server
+done
+```
+
+Add the following `docker-compose.yml` file to the `clickhouse-cluster` directory:
+
+```yaml title="docker-compose.yml"
+
+```
+
+Create the following sub-directories and files:
+
+```bash
+for i in {01..02}; do
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d
+ mkdir -p fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/config.d/config.xml
+ touch fs/volumes/clickhouse-${i}/etc/clickhouse-server/users.d/users.xml
+done
+```
+
+
+
+## Configure ClickHouse nodes {#configure-clickhouse-servers}
+
+### Server setup {#cluster-configuration}
+
+Now modify each empty configuration file `config.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/config.d`. The lines which are
+highlighted below need to be changed to be specific to each node:
+
+```xml
+
+```
+
+| Directory | File |
+|-----------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-01/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-01/etc/clickhouse-server/config.d/config.xml) |
+| `fs/volumes/clickhouse-02/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml) |
+
+Each section of the above configuration file is explained in more detail below.
+
+#### Networking and logging {#networking}
+
+
+
+Logging is defined in the `` block. This example configuration gives
+you a debug log that will roll over at 1000M three times:
+
+```xml
+
+```
+
+For more information on logging configuration, see the comments included in the
+default ClickHouse [configuration file](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml).
+
+#### Cluster configuration {#cluster-configuration}
+
+Configuration for the cluster is set up in the `` block.
+Here the cluster name `cluster_1S_2R` is defined.
+
+The `` block defines the layout of the cluster,
+using the `` and `` settings, and acts as a
+template for distributed DDL queries, which are queries that execute across the
+cluster using the `ON CLUSTER` clause. By default, distributed DDL queries
+are allowed, but can also be turned off with setting `allow_distributed_ddl_queries`.
+
+`internal_replication` is set to true so that data is written to just one of the replicas.
+
+```xml
+
+```
+
+
+
+#### Keeper configuration {#keeper-config-explanation}
+
+The `` section tells ClickHouse where ClickHouse Keeper (or ZooKeeper) is running.
+As we are using a ClickHouse Keeper cluster, each `` of the cluster needs to be specified,
+along with its hostname and port number using the `` and `` tags respectively.
+
+Set up of ClickHouse Keeper is explained in the next step of the tutorial.
+
+```xml
+
+```
+
+:::note
+Although it is possible to run ClickHouse Keeper on the same server as ClickHouse Server,
+in production environments we strongly recommend that ClickHouse Keeper runs on dedicated hosts.
+:::
+
+#### Macros configuration {#macros-config-explanation}
+
+Additionally, the `` section is used to define parameter substitutions for
+replicated tables. These are listed in `system.macros` and allow using substitutions
+like `{shard}` and `{replica}` in queries.
+
+```xml
+
+```
+
+:::note
+These will be defined uniquely depending on the layout of the cluster.
+:::
+
+### User configuration {#user-config}
+
+Now modify each empty configuration file `users.xml` located at
+`fs/volumes/clickhouse-{}/etc/clickhouse-server/users.d` with the following:
+
+```xml title="/users.d/users.xml"
+
+```
+
+| Directory | File |
+|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-01/etc/clickhouse-server/users.d` | [`users.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-01/etc/clickhouse-server/users.d/users.xml) |
+| `fs/volumes/clickhouse-02/etc/clickhouse-server/users.d` | [`users.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/users.d/users.xml) |
+
+In this example, the default user is configured without a password for simplicity.
+In practice, this is discouraged.
+
+:::note
+In this example, each `users.xml` file is identical for all nodes in the cluster.
+:::
+
+## Configure ClickHouse Keeper {#configure-clickhouse-keeper-nodes}
+
+### Keeper setup {#configuration-explanation}
+
+
+
+| Directory | File |
+|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-keeper-01/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-01/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-02/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-02/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-03/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_1S_2R/fs/volumes/clickhouse-keeper-03/etc/clickhouse-keeper/keeper_config.xml) |
+
+
+
+## Test the setup {#test-the-setup}
+
+
+## Create a database {#creating-a-database}
+
+
+## Create a table on the cluster {#creating-a-table}
+
+
+## Insert data {#inserting-data}
+
+
+
+
+## Conclusion {#conclusion}
+
+
+
diff --git a/docs/architecture/cluster-deployment.md b/docs/deployment-guides/replication-sharding-examples/03_2_shards_2_replicas.md
similarity index 70%
rename from docs/architecture/cluster-deployment.md
rename to docs/deployment-guides/replication-sharding-examples/03_2_shards_2_replicas.md
index 66c71d9afb0..ec542342804 100644
--- a/docs/architecture/cluster-deployment.md
+++ b/docs/deployment-guides/replication-sharding-examples/03_2_shards_2_replicas.md
@@ -1,33 +1,48 @@
---
slug: /architecture/cluster-deployment
-sidebar_label: 'Cluster Deployment'
+sidebar_label: 'Replication + Scaling'
sidebar_position: 100
-title: 'Cluster Deployment'
+title: 'Replication + Scaling'
description: 'By going through this tutorial, you will learn how to set up a simple ClickHouse cluster.'
---
-> In this tutorial, you'll learn how to set up a simple ClickHouse cluster
-consisting of two shards and two replicas with a 3-node ClickHouse Keeper cluster
-for managing coordination and keeping quorum in the cluster.
+import ConfigExplanation from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx';
+import ListenHost from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx';
+import KeeperConfig from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx';
+import KeeperConfigExplanation from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx';
+import VerifyKeeperStatus from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx';
+import DedicatedKeeperServers from '@site/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx';
+
+> In this example, you'll learn how to set up a simple ClickHouse cluster which
+both replicates and scales. It consisting of two shards and two replicas with a
+3-node ClickHouse Keeper cluster for managing coordination and keeping quorum
+in the cluster.
+
+The architecture of the cluster you will be setting up is shown below:
+
+|Node|Description|
+|----|-----------|
+|`chnode1`|Data + ClickHouse Keeper|
+|`chnode2`|Data + ClickHouse Keeper|
+|`chnode3`|Used for ClickHouse Keeper quorum|
+
+
## Prerequisites {#prerequisites}
-- You've already set up a [local ClickHouse server](../getting-started/install/install.mdx)
+- You've set up a [local ClickHouse server](../getting-started/install/install.mdx) before
- You are familiar with basic configuration concepts of ClickHouse such as [configuration files](/operations/configuration-files)
- You have docker installed on your machine
-The architecture of the cluster we will be setting up is shown below:
-
-
## Set up directory structure and test environment {#set-up}
In this tutorial, you will use [Docker compose](https://docs.docker.com/compose/) to
-set up the ClickHouse cluster for simplicity. This setup could be modified to work
+set up the ClickHouse cluster. This setup could be modified to work
for separate local machines, virtual machines or cloud instances as well.
-Run the following commands to set up the directory structure for the cluster:
+Run the following commands to set up the directory structure for this example:
```bash
mkdir clickhouse-cluster
@@ -149,35 +164,12 @@ for i in {01..04}; do
done
```
-- The `config.d` directory contains ClickHouse server configuration file `config.xml`,
-in which custom configuration for each ClickHouse node is defined. This
-configuration gets combined with the default `config.xml` ClickHouse configuration
-file that comes with every ClickHouse installation.
-- The `users.d` directory contains user configuration file `users.xml`, in which
-custom configuration for users is defined. This configuration gets combined with
-the default ClickHouse `users.xml` configuration file that comes with every
-ClickHouse installation.
-
-
-
-:::tip Custom configuration directories
-It is a best practice to make use of the `config.d` and `users.d` directories when
-writing your own configuration, rather than directly modifying the default configuration
-in `/etc/clickhouse-server/config.xml` and `etc/clickhouse-server/users.xml`.
-
-The line
-
-```xml
-
-```
-
-Ensures that the configuration sections defined in the `config.d` and `users.d`
-directories override the default configuration sections defined in the default
-`config.xml` and `users.xml` files.
-:::
+
## Configure ClickHouse nodes {#configure-clickhouse-servers}
+### Server configuration {#cluster-configuration}
+
Now modify each empty configuration file `config.xml` located at
`fs/volumes/clickhouse-{}/etc/clickhouse-server/config.d`. The lines which are
highlighted below need to be changed to be specific to each node:
@@ -247,10 +239,12 @@ highlighted below need to be changed to be specific to each node:
9181
+
01
01
+
```
@@ -260,58 +254,85 @@ highlighted below need to be changed to be specific to each node:
| `fs/volumes/clickhouse-02/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-02/etc/clickhouse-server/config.d/config.xml) |
| `fs/volumes/clickhouse-03/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-03/etc/clickhouse-server/config.d/config.xml) |
| `fs/volumes/clickhouse-04/etc/clickhouse-server/config.d` | [`config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-04/etc/clickhouse-server/config.d/config.xml) |
-
-### Configuration explanation {#configuration-explanation-clickhouse}
-External communication to the network interface is enabled by activating the listen
-host setting. This ensures that the ClickHouse server host is reachable by other
-hosts.
+Each section of the above configuration file is explained in more detail below.
+
+#### Networking and logging {#networking}
+
+
+
+Logging configuration is defined in the `` block. This example configuration gives
+you a debug log that will roll over at 1000M three times:
```xml
-0.0.0.0
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 3
+
```
-Note that each node in the cluster gets the same cluster configuration defined by the
-`` section:
+For more information on logging configuration, see the comments included in the
+default ClickHouse [configuration file](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml).
+
+#### Cluster configuration {#cluster-config}
+
+Configuration for the cluster is set up in the `` block.
+Here the cluster name `cluster_2S_2R` is defined.
+
+The `` block defines the layout of the cluster,
+using the `` and `` settings, and acts as a
+template for distributed DDL queries, which are queries that execute across the
+cluster using the `ON CLUSTER` clause. By default, distributed DDL queries
+are allowed, but can also be turned off with setting `allow_distributed_ddl_queries`.
+
+`internal_replication` is set to true so that data is written to just one of the replicas.
```xml
-
-
- true
-
- clickhouse-01
- 9000
-
-
- clickhouse-03
- 9000
-
-
-
- true
-
- clickhouse-02
- 9000
-
-
- clickhouse-04
- 9000
-
-
-
-
+
+
+
+
+
+ true
+
+ clickhouse-01
+ 9000
+
+
+ clickhouse-03
+ 9000
+
+
+
+ true
+
+ clickhouse-02
+ 9000
+
+
+ clickhouse-04
+ 9000
+
+
+
+
```
The `` section defines the layout of the cluster,
and acts as a template for distributed DDL queries, which are queries that execute
across the cluster using the `ON CLUSTER` clause.
+#### Keeper configuration {#keeper-config-explanation}
+
The `` section tells ClickHouse where ClickHouse Keeper (or ZooKeeper) is running.
As we are using a ClickHouse Keeper cluster, each `` of the cluster needs to be specified,
-along with it's hostname and port number using the `` and `` tags respectively.
+along with its hostname and port number using the `` and `` tags respectively.
-Set-up of ClickHouse Keeper is explained in the next step of the tutorial.
+Set up of ClickHouse Keeper is explained in the next step of the tutorial.
```xml
@@ -330,6 +351,13 @@ Set-up of ClickHouse Keeper is explained in the next step of the tutorial.
```
+:::note
+Although it is possible to run ClickHouse Keeper on the same server as ClickHouse Server,
+in production environments we strongly recommend that ClickHouse Keeper runs on dedicated hosts.
+:::
+
+#### Macros configuration {#macros-config-explanation}
+
Additionally, the `` section is used to define parameter substitutions for
replicated tables. These are listed in `system.macros` and allow using substitutions
like `{shard}` and `{replica}` in queries.
@@ -341,6 +369,8 @@ like `{shard}` and `{replica}` in queries.
```
+### User configuration {#cluster-configuration}
+
Now modify each empty configuration file `users.xml` located at
`fs/volumes/clickhouse-{}/etc/clickhouse-server/users.d` with the following:
@@ -384,127 +414,39 @@ Now modify each empty configuration file `users.xml` located at
```
-:::note
-Each `users.xml` file is identical for all nodes in the cluster.
-:::
-
-## Configure ClickHouse Keeper nodes {#configure-clickhouse-keeper-nodes}
-
-In order for replication to work, a ClickHouse keeper cluster needs to be set up and
-configured. ClickHouse Keeper provides the coordination system for data replication,
-acting as a stand in replacement for Zookeeper, which could also be used.
-ClickHouse Keeper is however recommended, as it provides better guarantees and
-reliability and uses fewer resources than ZooKeeper. For high availability and in
-order to keep quorum it is recommended to run at least 3 ClickHouse Keeper nodes.
+In this example, the default user is configured without a password for simplicity.
+In practice, this is discouraged.
:::note
-ClickHouse Keeper can run on any node of the cluster alongside ClickHouse, although
-it is recommended to have it run on a dedicated node which allows to scale and
-manage the ClickHouse Keeper cluster independently from the database cluster.
+In this example, each `users.xml` file is identical for all nodes in the cluster.
:::
-Create the `keeper_config.xml` files for each ClickHouse Keeper node
-using the following command:
+## Configure ClickHouse Keeper {#configure-clickhouse-keeper-nodes}
-```bash
-for i in {01..03}; do
- touch fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper/keeper_config.xml
-done
-```
+Next you will configure ClickHouse Keeper, which is used for coordination.
-Modify these empty configuration files called `keeper_config.xml` in each
-node directory `fs/volumes/clickhouse-keeper-{}/etc/clickhouse-keeper`. The
-highlighted lines below need to be changed to be specific to each node:
+### Keeper setup {#configuration-explanation}
-```xml title="/config.d/config.xml"
-
-
- information
- /var/log/clickhouse-keeper/clickhouse-keeper.log
- /var/log/clickhouse-keeper/clickhouse-keeper.err.log
- 1000M
- 3
-
- 0.0.0.0
-
- 9181
-
- 1
- /var/lib/clickhouse/coordination/log
- /var/lib/clickhouse/coordination/snapshots
-
- 10000
- 30000
- information
-
-
-
- 1
- clickhouse-keeper-01
- 9234
-
-
- 2
- clickhouse-keeper-02
- 9234
-
-
- 3
- clickhouse-keeper-03
- 9234
-
-
-
-
-```
-
-### Configuration explanation {#configuration-explanation}
-
-Each configuration file will contain the following unique configuration (shown below).
-The `server_id` used should be unique for that particular ClickHouse Keeper node
-in the cluster and match the server `` defined in the `` section.
-`tcp_port` is the port used by _clients_ of ClickHouse Keeper.
+
-```xml
-9181
-{id}
-```
+| Directory | File |
+|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `fs/volumes/clickhouse-keeper-01/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-keeper-01/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-02/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-keeper-02/etc/clickhouse-keeper/keeper_config.xml) |
+| `fs/volumes/clickhouse-keeper-03/etc/clickhouse-server/config.d` | [`keeper_config.xml`](https://github.com/ClickHouse/examples/blob/main/docker-compose-recipes/recipes/cluster_2S_2R/fs/volumes/clickhouse-keeper-03/etc/clickhouse-keeper/keeper_config.xml) |
-The following section is used to configure the servers that participate in the
-quorum for the [raft consensus algorithm](https://en.wikipedia.org/wiki/Raft_(algorithm)):
-
-```xml
-
-
- 1
- clickhouse-keeper-01
-
-
- 9234
-
-
- 2
- clickhouse-keeper-02
- 9234
-
-
- 3
- clickhouse-keeper-03
- 9234
-
-
-```
+
## Test the setup {#test-the-setup}
Make sure that docker is running on your machine.
-Start the cluster using the `docker-compose up` command from the `clickhouse-cluster` directory:
+Start the cluster using the `docker-compose up` command from the root of the `cluster_2S_2R` directory:
```bash
docker-compose up -d
```
-You should see docker begin to pull the ClickHouse and Zookeeper images,
+You should see docker begin to pull the ClickHouse and Keeper images,
and then start the containers:
```bash
@@ -520,9 +462,7 @@ and then start the containers:
```
To verify that the cluster is running, connect to any one of the nodes and run the
-following query.
-For the sake of this example, the command to connect to the
-first node is shown:
+following query. The command to connect to the first node is shown:
```bash
# Connect to any node
@@ -575,17 +515,19 @@ WHERE path IN ('/', '/clickhouse')
└────────────┴───────┴─────────────┘
```
-With this you have successfully set up a ClickHouse cluster with two shards and two replicas.
-In the next step we will create a table in the cluster.
+
+
+With this, you have successfully set up a ClickHouse cluster with two shards and two replicas.
+In the next step, you will create a table in the cluster.
-## Creating a distributed database {#creating-a-table}
+## Create a database {#creating-a-database}
-In this tutorial, you will be recreating the same table as the one used in the
-[UK property prices](/getting-started/example-datasets/uk-price-paid) example dataset tutorial.
-It consists of around 30 million rows of prices paid for real-estate property in England and Wales
-since 1995.
+Now that you have verified the cluster is correctly setup and is running, you
+will be recreating the same table as the one used in the [UK property prices](/getting-started/example-datasets/uk-price-paid)
+example dataset tutorial. It consists of around 30 million rows of prices paid
+for real-estate property in England and Wales since 1995.
-Start each client of each host, by running each of the following commands from separate terminal
+Connect to the client of each host by running each of the following commands from separate terminal
tabs or windows:
```bash
@@ -611,8 +553,8 @@ SHOW DATABASES;
└────────────────────┘
```
-From the `clickhouse-01` client run the following **distributed** DDL query using the `ON CLUSTER` clause to create a
-database:
+From the `clickhouse-01` client run the following **distributed** DDL query using the
+`ON CLUSTER` clause to create a new database called `uk`:
```sql
CREATE DATABASE IF NOT EXISTS uk
@@ -639,7 +581,7 @@ SHOW DATABASES;
└────────────────────┘
```
-## Creating local tables on the cluster {#creating-a-table}
+## Create a local table on the cluster {#creating-a-table}
Now that the database has been created, create a distributed table in the cluster.
Run the following query from any of the host clients:
@@ -671,14 +613,15 @@ ORDER BY (postcode1, postcode2, addr1, addr2);
Notice that it is identical to the query used in the original `CREATE` statement of the
[UK property prices](/getting-started/example-datasets/uk-price-paid) example dataset tutorial,
-except for the `ON CLUSTER` clause and the `ReplicatedMergeTree` engine.
+except for the `ON CLUSTER` clause and use of the `ReplicatedMergeTree` engine.
The `ON CLUSTER` clause is designed for distributed execution of DDL (Data Definition Language)
queries such as `CREATE`, `DROP`, `ALTER`, and `RENAME`, ensuring that these
schema changes are applied across all nodes in a cluster.
The [`ReplicatedMergeTree`](https://clickhouse.com/docs/engines/table-engines/mergetree-family/replication#converting-from-mergetree-to-replicatedmergetree)
-engine works just as the ordinary `MergeTree` table engine, but it will also replicate the data. It requires two parameters to be specified:
+engine works just as the ordinary `MergeTree` table engine, but it will also replicate the data.
+It requires two parameters to be specified:
- `zoo_path`: The Keeper/ZooKeeper path to the table's metadata.
- `replica_name`: The table's replica name.
@@ -704,20 +647,20 @@ SHOW TABLES IN uk;
```
```response title="Response"
- ┌─name──────────┐
-1. │ uk_price_paid │
- └───────────────┘
+ ┌─name────────────────┐
+1. │ uk_price_paid_local │
+ └─────────────────────┘
```
## Insert data using a distributed table {#inserting-data-using-distributed}
To insert data into the distributed table, `ON CLUSTER` cannot be used as it does
not apply to DML (Data Manipulation Language) queries such as `INSERT`, `UPDATE`,
-and `DELETE`. In order to insert data, it is necessary to make use of the
+and `DELETE`. To insert data, it is necessary to make use of the
[`Distributed`](/engines/table-engines/special/distributed) table engine.
From any of the host clients, run the following query to create a distributed table
-using the existing table we created previously with `ON CLUSTER` and
+using the existing table we created previously with `ON CLUSTER` and use of the
`ReplicatedMergeTree`:
```sql
@@ -782,10 +725,9 @@ across the nodes of our cluster:
```sql
SELECT count(*)
-FROM uk.uk_price_paid_distributed
-LIMIT 10;
+FROM uk.uk_price_paid_distributed;
-SELECT count(*) FROM uk.uk_price_paid_local LIMIT 10;
+SELECT count(*) FROM uk.uk_price_paid_local;
```
```response
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx
new file mode 100644
index 00000000000..b2afbc447dc
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_config_explanation.mdx
@@ -0,0 +1,24 @@
+- The `config.d` directory contains ClickHouse server configuration file `config.xml`,
+ in which custom configuration for each ClickHouse node is defined. This
+ configuration gets combined with the default `config.xml` ClickHouse configuration
+ file that comes with every ClickHouse installation.
+- The `users.d` directory contains user configuration file `users.xml`, in which
+ custom configuration for users is defined. This configuration gets combined with
+ the default ClickHouse `users.xml` configuration file that comes with every
+ ClickHouse installation.
+
+:::tip Custom configuration directories
+It is a best practice to make use of the `config.d` and `users.d` directories when
+writing your own configuration, rather than directly modifying the default configuration
+in `/etc/clickhouse-server/config.xml` and `etc/clickhouse-server/users.xml`.
+
+The line
+
+```xml
+
+```
+
+Ensures that the configuration sections defined in the `config.d` and `users.d`
+directories override the default configuration sections defined in the default
+`config.xml` and `users.xml` files.
+:::
\ No newline at end of file
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx
new file mode 100644
index 00000000000..0326e23d49c
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_dedicated_keeper_servers.mdx
@@ -0,0 +1,8 @@
+:::note
+Although it is possible to run ClickHouse Server and ClickHouse Keeper combined on the same server,
+we strongly recommend using *dedicated* hosts for ClickHouse keeper in production environments,
+which is the approach we will demonstrate in this example.
+
+Keeper servers can be smaller, and 4GB RAM is generally enough for each Keeper server
+until your ClickHouse Servers grow large.
+:::
\ No newline at end of file
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx
new file mode 100644
index 00000000000..55ecdec7618
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_config.mdx
@@ -0,0 +1,67 @@
+In order for replication to work, a ClickHouse keeper cluster needs to be set up and
+configured. ClickHouse Keeper provides the coordination system for data replication,
+acting as a stand in replacement for Zookeeper, which could also be used.
+ClickHouse Keeper is, however, recommended, as it provides better guarantees and
+reliability and uses fewer resources than ZooKeeper. For high availability and to
+keep quorum, it is recommended to run at least three ClickHouse Keeper nodes.
+
+:::note
+ClickHouse Keeper can run on any node of the cluster alongside ClickHouse, although
+it is recommended to have it run on a dedicated node which allows scaling and
+managing the ClickHouse Keeper cluster independently of the database cluster.
+:::
+
+Create the `keeper_config.xml` files for each ClickHouse Keeper node
+using the following command from the root of the example folder:
+
+```bash
+for i in {01..03}; do
+ touch fs/volumes/clickhouse-keeper-${i}/etc/clickhouse-keeper/keeper_config.xml
+done
+```
+
+Modify the empty configuration files which were created in each
+node directory `fs/volumes/clickhouse-keeper-{}/etc/clickhouse-keeper`. The
+highlighted lines below need to be changed to be specific to each node:
+
+```xml title="/config.d/config.xml"
+
+
+ information
+ /var/log/clickhouse-keeper/clickhouse-keeper.log
+ /var/log/clickhouse-keeper/clickhouse-keeper.err.log
+ 1000M
+ 3
+
+ 0.0.0.0
+
+ 9181
+
+ 1
+ /var/lib/clickhouse/coordination/log
+ /var/lib/clickhouse/coordination/snapshots
+
+ 10000
+ 30000
+ information
+
+
+
+ 1
+ clickhouse-keeper-01
+ 9234
+
+
+ 2
+ clickhouse-keeper-02
+ 9234
+
+
+ 3
+ clickhouse-keeper-03
+ 9234
+
+
+
+
+```
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx
new file mode 100644
index 00000000000..230dab5e9ce
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_keeper_explanation.mdx
@@ -0,0 +1,34 @@
+Each configuration file will contain the following unique configuration (shown below).
+The `server_id` used should be unique for that particular ClickHouse Keeper node
+in the cluster and match the server `` defined in the `` section.
+`tcp_port` is the port used by _clients_ of ClickHouse Keeper.
+
+```xml
+9181
+{id}
+```
+
+The following section is used to configure the servers that participate in the
+quorum for the [raft consensus algorithm](https://en.wikipedia.org/wiki/Raft_(algorithm)):
+
+```xml
+
+
+ 1
+ clickhouse-keeper-01
+
+
+ 9234
+
+
+ 2
+ clickhouse-keeper-02
+ 9234
+
+
+ 3
+ clickhouse-keeper-03
+ 9234
+
+
+```
\ No newline at end of file
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx
new file mode 100644
index 00000000000..df3c71edf44
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_listen_host.mdx
@@ -0,0 +1,21 @@
+External communication to the network interface is enabled by activating the listen
+host setting. This ensures that the ClickHouse server host is reachable by other
+hosts:
+
+```xml
+0.0.0.0
+```
+
+The port for the HTTP API is set to `8123`:
+
+```xml
+8123
+```
+
+The TCP port for interaction by ClickHouse's native protocol between clickhouse-client
+and other native ClickHouse tools, and clickhouse-server and other clickhouse-servers
+is set to `9000`:
+
+```xml
+9000
+```
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_server_parameter_table.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_server_parameter_table.mdx
new file mode 100644
index 00000000000..4586778b803
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_server_parameter_table.mdx
@@ -0,0 +1,6 @@
+For each server, the following parameters are specified:
+
+| Parameter | Description | Default Value |
+|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|
+| `host` | The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server does not start. If you change the DNS record, you need to restart the server. | - |
+| `port` | The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Not to be confused with `http_port`. | - |
diff --git a/docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx b/docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx
new file mode 100644
index 00000000000..15f60053792
--- /dev/null
+++ b/docs/deployment-guides/replication-sharding-examples/_snippets/_verify_keeper_using_mntr.mdx
@@ -0,0 +1,70 @@
+The `mntr` command is also commonly used to verify that ClickHouse Keeper is
+running and to get state information about the relationship of the three Keeper nodes.
+In the configuration used in this example, there are three nodes working together.
+The nodes will elect a leader, and the remaining nodes will be followers.
+
+The `mntr` command gives information related to performance, and whether a particular
+node is a follower or a leader.
+
+:::tip
+You may need to install `netcat` in order to send the `mntr` command to Keeper.
+Please see the [nmap.org](https://nmap.org/ncat/) page for download information.
+:::
+
+Run the command below from a shell on `clickhouse-keeper-01`, `clickhouse-keeper-02`, and
+`clickhouse-keeper-03` to check the status of each Keeper node. The command
+for `clickhouse-keeper-01` is shown below:
+
+```bash
+docker exec -it clickhouse-keeper-01 echo mntr | nc localhost 9181
+```
+
+The response below shows an example response from a follower node:
+
+```response title="Response"
+zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726
+zk_avg_latency 0
+zk_max_latency 0
+zk_min_latency 0
+zk_packets_received 0
+zk_packets_sent 0
+zk_num_alive_connections 0
+zk_outstanding_requests 0
+# highlight-next-line
+zk_server_state follower
+zk_znode_count 6
+zk_watch_count 0
+zk_ephemerals_count 0
+zk_approximate_data_size 1271
+zk_key_arena_size 4096
+zk_latest_snapshot_size 0
+zk_open_file_descriptor_count 46
+zk_max_file_descriptor_count 18446744073709551615
+```
+
+The response below shows an example response from a leader node:
+
+```response title="Response"
+zk_version v23.3.1.2823-testing-46e85357ce2da2a99f56ee83a079e892d7ec3726
+zk_avg_latency 0
+zk_max_latency 0
+zk_min_latency 0
+zk_packets_received 0
+zk_packets_sent 0
+zk_num_alive_connections 0
+zk_outstanding_requests 0
+# highlight-next-line
+zk_server_state leader
+zk_znode_count 6
+zk_watch_count 0
+zk_ephemerals_count 0
+zk_approximate_data_size 1271
+zk_key_arena_size 4096
+zk_latest_snapshot_size 0
+zk_open_file_descriptor_count 48
+zk_max_file_descriptor_count 18446744073709551615
+# highlight-start
+zk_followers 2
+zk_synced_followers 2
+# highlight-end
+```
\ No newline at end of file
diff --git a/docs/deployment-guides/terminology.md b/docs/deployment-guides/terminology.md
index de347e53a26..1c200db2379 100644
--- a/docs/deployment-guides/terminology.md
+++ b/docs/deployment-guides/terminology.md
@@ -8,17 +8,16 @@ description: 'Page with deployment examples that are based on the advice provide
import ReplicationShardingTerminology from '@site/docs/_snippets/_replication-sharding-terminology.md';
-These deployment examples are based on the advice provided to ClickHouse users by the ClickHouse Support and Services organization. These are working examples, and we recommend that you try them and then adjust them to suit your needs. You may find an example here that fits your requirements exactly. Alternatively, should you have a requirement where data is replicated three times instead of two, you should be able to add another replica by following the patterns presented here.
+The deployment examples in this section are based on the advice provided to ClickHouse users by
+the ClickHouse Support and Services organization. These are working examples, and
+we recommend that you try them and then adjust them to suit your needs. You may find
+an example here that fits your requirements exactly.
-
-
-## Examples {#examples}
-
-### Basic {#basic}
+We offer 'recipes' of a number of different topologies in the [example repo](https://github.com/ClickHouse/examples/tree/main/docker-compose-recipes/recipes)
+and recommend taking a look at them if the examples in this section do not fit your
+needs exactly.
-- The [**Scaling out**](/deployment-guides/horizontal-scaling.md) example shows how to shard your data across two nodes, and use a distributed table. This results in having data on two ClickHouse nodes. The two ClickHouse nodes also run ClickHouse Keeper providing distributed synchronization. A third node runs ClickHouse Keeper standalone to complete the ClickHouse Keeper quorum.
-
-- The [**Replication for fault tolerance**](/deployment-guides/replicated.md) example shows how to replicate your data across two nodes, and use a ReplicatedMergeTree table. This results in having data on two ClickHouse nodes. In addition to the two ClickHouse server nodes there are three ClickHouse Keeper standalone nodes to manage replication.
+
-
-### Intermediate {#intermediate}
-
-- Coming soon
-
-### Advanced {#advanced}
-
-- Coming soon
diff --git a/sidebars.js b/sidebars.js
index b094a739a79..dacc0f90598 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -1314,10 +1314,18 @@ const sidebars = {
link: { type: "doc", id: "deployment-guides/index" },
items: [
"deployment-guides/terminology",
- "deployment-guides/horizontal-scaling",
- "deployment-guides/replicated",
- "deployment-guides/parallel-replicas",
- "architecture/cluster-deployment",
+ {
+ type: "category",
+ label: "Examples",
+ collapsed: false,
+ items: [
+ {
+ type: "autogenerated",
+ dirName: "deployment-guides/replication-sharding-examples",
+ }
+ ]
+ },
+ "deployment-guides/parallel-replicas"
]
},
"guides/separation-storage-compute",
diff --git a/static/images/deployment-guides/architecture_1s_2r_3_nodes.png b/static/images/deployment-guides/architecture_1s_2r_3_nodes.png
deleted file mode 100644
index a8d39ee243636fa2853303bf8805fa498e0fffa5..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 546145
zcmYIQ1ys~c*XI#M0RbhYyGxp-LAnJ5mX_{VmM#Sm0m-GiJER*#q`N~>I;Bg*Z+P{2
zKF;yL@odb0=FXjae=)(T$}$*eL}*AzNEmXml4?ju$W`E<)T4Xgf9UFjgpeM+Vvv&*
z*KnWPOMU2#yFL|!Kzf^TO5|Do&o)I1gOb2gNr4Y<3P2L!nsc-_(=X%{I^M8TJVpU*$A
z+HwEyT{ScF6YpKmIf=pPz?O;AnDH)$tN9cUU
z1cQkCLr6%GVVmzlqYHSEcDX^z^~;y1o(MXnBt8cbZ^-)il?)j2E$=2wskj`&l3djh`(IqNYB=uy^K9E#vl<00Rb3?5>@_X@=IzEK2u!q1ET-
zVv83RnUE}+*UFa%l{5YE?1l}n^|4%qQ_(v61p$}qe00&@isj=?7Q<=6t~Xz?L~msG
z=4y6#%i9(iPDW+OTYerip%bvB@Y-Ep2IF*x`-H{=3
zrB;)r2+=elkK_4Lqp3IDKS<9_kdS&v&4*I1rgR+|<`0(UKCd1Xs7>(M%c;2^
z9#&*`hyOn85IJ6E)vdLitKLrS{0(h{Glo3}-@ZMbuad-VHDB-45rpLjzEUQfWU`?j
zXI~Ibvtlz@^a?~wR#ukvbeVpQ^`wZ;btGXt3$)U5G|RN>>SS{`nU5U`)7I7wBjV}k
z=-7_EH_NE~$!OHL^EfbQ6AWJ^O{m)JgQBG5ePN%qC>bjPPsL>6>#giSG(0A?9N7xv
zPWc#ym)X)7oIg0t`Xu{em94eG{|n
zCbuqUU{$cgLt{Nv0xi~LZNzWPYINC^?iD}n3L{4B*4fTR-=y(^xh=a5wqNi%-JNC^
z5b*4Yq#PE$m_Hd7CC9;efpa!zQ?=-QqJ+H;zO78R?jYHr_3HNLkBi$+5%yxzVMKuu
zwEv4=q=wJU9w#W6AH#h;pP&+6%_CGqX0y!t8_#xL6=?(yM8AuTsjwV1Uhj{mS12LJ
zo6M5xS56hUI9TvHTyEz;!tz_0{>jGR?qsnHnnI%@rqX*Qc_n{k0@+pt0$JsyGM}I{VK2;Q
z&yBW5GEKl{JL)8$2a9U5F)(dD2t7V%Qn)=~(_fvdvEg%AY_|JW3nKH!x4IuaQPlZh
z68vs&z&Z{aMt&bTIyxd?H=MMtkB$&(&GpzTRBlUoLRB`g0*9#S
z(;*5SSH4tKR9u?gLo(_@LORUw*i3bkb?)r~Gk7w{w^XuJz21%dQ0_Gj4$h!?)%Z52
z?tV2?uTo%g8}f^bk=aO|pHm`p%mUqYofd9tyohR3q{E6M5;_%XcX<445^Az4r9_luS*
zb=c99@LZbRO}|$6jr^*m@iTIvdW&JQ%Ed5ZK9GB^TP_y;PD<-G2Yh_yipyI$U5Ez!7m!$}~fJx?k(L3(o8pR4itc@CEu1vz)o=Xwg{To7(}%&(}vv8m@N24mBP;4-Rx
zGHO3Obi_Uy85!}uI(B-OJdt9a*4*rV^qp2Aj)fWr8(Tv3sP{h7=a2s&EsIy#&Q^l_
z9Gy}DXJS={86tSNxVYHl_Tdcy8Z(x)qfHkiTv8?@$t;@5siitbZv5)&Ja)}9Y#dVk
zX-fA}bEeQu@V6eD!<-B~J&U-)0!q#`2q5;-=6TBDx!#phIuO)j#AC*;s5w-CiC(BA(39?6yr#aSqPGU>STL0i00QR}C0p89WJA
zM6A3))4M#nB|0smD~<_@8{aBW`TW8rLBy7x@6A@_%E#)|?g@Ky{z+->BPDtbFp!xZ
zpy5)OP*>nku6+FXIG|U8DxqY`IqD&{;7Z_H5>r@OM~ir*jrFN{Y*z-lZ2n9jra0CX
zJ%%!|wwW7^Sxz^kSnS!LW>x2bqO+IJw)AX|z7<)q&^kSecuj7Zx#;5l@UL>D6j-`U_kMggB3BR)~Iy~hQJKu+7t
z)q9?S8l01fbRYFDD-P%QA&&>XjR%;9wZ~F>1ehibq;+wiRBJyH&@Uff-*Be*#iL6q
zlFr@@(Y?$%pvhY+c|srbR_TqUCdQQU)oTp2QMGCb@6pDrAThPOkCtSQR-M
zfnsiy&;`os1m#JxL*+pHCLRH5JNB$jns#sgMzRXwZFajzuG>;nlB4NM({y(2&~1sW
z7)zOmX$7tJOfxyh77-_Y5x=1E^(d3E8#D;FjX;Y8eFu($4E+!K>{jn}olJ>9*ZoDG
zVRs==kKOGP_O+%Tt;82Mm*9};q^GF}+g{&$ttO1k6K%w)Vb=c`_SLk8ZF5XuOdxfX
zB)x<~mlP5rBCM1YXXtCsDPZ@9rU(clIN
z+X1}6g)X#n=Q)$mt3IZo9#uM$ab$st36cg96dY0x1aB*0atzl$0x!bzoc-_XhaYeC
z#XQC~Y$RE)l=6(vk7rX&dVR%dMNHIA`>@Ky#N@6(89Yz@|5SK&)J-nvaxh=2dy!oC
z!x1}BG_pc0B383IC$es1MDzHHOU?pn6#nE05SN^dN3
zFg}z_QF9PNwpMRid|Jlz30rw+sb*b>A(c0b$^vOg_!XNq<>?%I`6FD_i6TSAFv_xaT%LogjtWu6k5ypMk%QHbjYT<-0mq02v>!TmsoiYHplFH(n7W|5u5XS=$cK`P?JYQ?JVW`3aVDI
zc(av?)0+yBT5WPfNjr-2y+!%vRry4$uFBN-rl#i6e94uu5wi4?xW((1B%bnJy~wbF
zSK1}V8P)9ip<~c(BX9o9PPRH3G^Wj-IP~M-w|ZZ(>D9ma|6RO0T|Ql^Tj%@h
zx?3gfyg$$v@=I(eb#NoF70wtzWl2fYkg8KwW>g{IuNeQn5vTmPZ!T#^UVT?_8kEKIQ4t7$nwe105`jW@1_V_e|V6L)g5b
zYesLi;|R*f=!(xE`JwOEPi?U0Gw$KCH0`ZR%>S^i(2+UlcZrz|*q+~qvDNH-H@I38
zuCrh2O&9$QPNc*C&Id$FiXM0xQ2(b0x+&LZ4<4)`vdEM88TL?nNtf;
zoL)Wa8#QJZ%#ycR7Gn?2jR`_bES5;x2>p_xr_F-*yrAA?k_~R!nBlh57rzV
zJl{|JhHBF}kZ4SQN!wAIdSeoW=TVoFU}a_BGWvyc2X#Uc9pc9Wp-*wTp#;n-#s%i_urcEU_((dIIm+SsM(;6hZXYj`v41U-l=Y%g*s
z=;Wi12syhGqWIC0{!aJzU3f$9r`SWx8HD3i+BqTNRn*`IqzDX%E_P&AOXV2M-R*!p|
zaaHIEtnhVMG#9Rr6)v_$;I~Jpa;9C7-S7YWo$>pio!VJw8V(}&**m-Puf&a>^fVJU
zj24X3=d{GuUY8KaPNAcmLPW9bJYEZoj)sj%4ds!!VQWx*?I%<0>lZh(_v|)X=6^p(
z`Hh0lj5W_~6~Vo@_tJF8ZkeK5$jIC{gw4X&|I>{auQRV3{pcnQ8n&Lw`G%`EJaxWd
zh`QE_*qw17lP2M#q!O=|3MLA_RozzzY+2I
z>3i57v}f_%CAHU>lbM(_-x?l`FKFt=lVC@+E#XidsflJso0!Kc{7(zD8F%DwvvL8F
zS<~!!{0GTXX1|FmX7W`GSKM0
zdR)yBQEqG4?K#+@ur(HQWb{_vxo$i7(AP7#cJr7h<+;+g$YV6tyw{~~Tc=1U;_pqW
z4L@a;y9pi_4SfGzfCST<5Y6a``
zcG=E*8(PB{J$aoW(C3}TCIyGmPg$oN7fMr(-o^jv
z%yM~RG3iqk(w%}Mk*_2uA9}bZh-L*zUl0%Yz@_23ygJsOV)Hu2C#h@;)WL!11RHaA
z$Poa0-aH)45D&OM-R8PDyZVj?NxVD|5IRU*!CI4IFsYCrge=Q9D(Z$@ThjRDM2I&u
zFxl145%d*2Uvc%4<=!u3J6DL78Y0u=J$vFSr$!tT&76ClXW>ZD__-#hsHQ!u*IXrN
zsz*2_Aex(#lM|r^j~?lNyO?6OQBqrixI=!zJ{^qP%
zj2nbdPAKcl95F`FGH)lAE3c_3$Wf2+kLd#NiUlAP-rnA-|6u%q+CCeGYUB-C`kL)b
z0F+@c1%kj0n~yI!vVU5^T~|<)hv2e@1W^iqq8Vb(RWaomRkeED$79{R!=^qe3bpvO
zg*V%nqWpPsu(BD-X~itaM*4w=ub}YZZvCrs<@K5meOO;Kt-5EvE-x3aeIMd3s~<26
z-qx1CSXU+&LKY_#zJX2XePj%+4^j!HZg)%8`KFGT5Lo{%WtNdT9BCdvHu9*?@iLB8
zHw2&6^We*{xYg|6&}hSl)qMOo&>2eLwliV%9i2VIh_y{YEg>e$q-I@}!K<{|h>fB?
zD~PMbm|HiNs$j~@=9BI7Z;~kfq9Z3zZqqo$Ib)MqS`S(qTxc5=63bASvhFO&S8_{+
z{o%#;tIEdm(&yo1dh|1<6`^c!*G|;KOSomY^|&rb(qrS}I2uG#_L)`2joPOTJ<=g0
zaC|`+r<0L+PLVUWm2_2@tCOkdTL7(Smg=0XZC717{%PhT(eIG0QXS#jo692>?~*FL
zU-1EjeLKx+;SOS$67d2^5*S~M~1ym?shr?sXa
zm}zvesWKnz`bbDEe;z$oRPD*eAilE!mm?M4($B^JiClcZ?<@wP$bS@?KTq0iN$!&*9nf>8nY3~h{o$@*mx=V7Zj2Ou#!+UvDmZf9!c5;I~
z*etJ9OgafRJ*F2*F)KYpIu9Df)NMj1GyBpuD2*54Z)uha
zl?euk^^+H1vc{bT-Syn
zr%3!0Pfo~Gv3d~`#H>=f+`A8bA*OspJvX+Cw$P(}_R|VA8a_)8dx-2OLuKg3mWy7r
zbYf3CAwE!vmNd7{b3u?QU(>AK?^f6v9AN_x}l-&7gS(fXxXB&v;+f
z3|U5^%RAO0H87{Y!V}!Wdr#eb905-(<6>j{<}YeC#zi`p7?(Gd
zY$+nCU@cvy^m;QfBJmgokY#|vhx~^aib+UF6sqNVo^Hu8Dc;MUBL4Muxj@6boiZ^+
zeDrZd&Dbs?F8jeNZt70lmDgj&L%VoCLX2_yCSGH)^VMmNQq>cz%p$!zN7a@j;K-5lh5Lcmse*@b53mMvjsQe
z)cFMtWexoOxdwge?{ZY8_3%DTYTMynBA=n`k+7hG0C8fWU#H^{!sjypVt@1B$lr9V
zFsNpT0Tw`R87_@pm>e%W#>@&uxBfU_hR?~Bo7E;B+gXi**eZWrl`l`4gLkAAF%Nwa
zPG}MMa{bmBhWxagolQ=un-l^w)z(g6_$j8QEWZ;!j_059Y61lx(oR+$p(xD7HDluf
z`+*fbeR=s60N?SQG1P;n>N_m^=aqXzYc^U3%GA1~h0U_cJE+lVWh}!37&6)y+i{5d
zQtXLwm)FSLeIZNJ$@-yk9#Wnh#ig8xgp-FcoeX312VceH4^qG4hCjg~6{uFtlxPlP
zKvE2#Ej;Tpaa!amW^Jm+y?$jC&}cGJIMzF!Ijh8HH;0RjUFo|@_q2kpAz%Hw%R8!j
ztdZCyq{?w#ExqzoWD~|oC>QQ6@O#0Ga}#@K3ziCTdnL-g=FCgTvKVE^Ry36t`4xWS
zj}Lq6tj6;IfV$-SlT`%q_cX->SfA}gL51iiP*({fxD2ai?I@xs407cZ`Zb>@K{@?v
zv~ylOSF=OwOyYu#)YI??Qe9PWjohOa5NL!aa{5AEXKR%&rV6_8KKob^ykm=2#(*?1
z7`I)&SKl`{pxGpEli|Ff&C^l|xEIdV2k1-77(J`KJ`K7-uvC%`&OwiX)3W|`%)a&I
z`Nyg4DH7(XEs=rG!sro?6KvcV){|XV9Qjjk2WMzBP3Kx)cf1EQ3?O*~|Nj91hv2tU
zn12J4zHh}*{#@Ojte55fph`rIvdWY!jo
zCYuytPmxyuFI9G9(<&ngwr#>BZCPyUWQcib5yh91#WMqvP_|w4`f)E+fui^ItOYlP
z$L1Za;?c*vO0SqAf+MG8l+#?gCwr>vpc&=nI(x${V3iPY2jv5gaKsXG)~$<(|)$p>jDLXC}I6ciDoga
z-#*~FK!fWbwwh2H{wCR6D!i92pgML)6*oHDO%!l)@dloSTIB{&QPI}SjA}Vs*BHtE
zlIdL1+KwI`da5-cB`)uHn|h74tR6#_`Wi_fg-P}_-Xi1}-_Z)5>*tLaXHT+C?f2rW
zvjQyn$6^a$)&4&(y#TC4e=IY7-!IsXT^#|%oj(7Bf+lkGw({wF0%z=Q*Wcm<`4yO
znMUDQD1z<>;Lhd)4fU+jC`MJ{|@4rMQ*oWGuO7^C4coG$ZB!{s2$4VXNQwohAUjCe_D`(!_$m*uot+K?6zS8+BuK<
z0~u*pHFqm>g`d-Std)~6&|aNfk3j?>3Q4JKg{{d{tO>YR)0gi6w}h(Vb92-Uc!F=#
zX6%uS(o#GWG8g^KGci!KzQ8x8VbxGcNy$_Jm$>Gpk0u@Vbw9s3wDIV&pfc@)N_=Os
zc)A2yW%{ZQuyTOJTc}XqB;>LfO5zC(=l9%m_VsW|qn;#;A>WG^^!&68+YC>s3FCpK
zV>T|nTl0zu>aTQ$I^(?3$%kLLQEzc4aVCRNnU1!okT3umB>)9uuF?$szuj^#$7wvT
zO{d!OWwgYo-4J^`H=P#U3C%A&tH)@%u{sWs?4eXPeTS^Um4j(XaV>d2_Gd252a^zK
z_)Hp>%b)Mct-NlU5|S)ByfQ#~!r|N&sN>8wQzcFfG{qb879!~p=f=4kKEW8oydx+w
z5oEdD&W45=qn3R!j2#Y9{y*_t0@)2)71@?bGP}kP1!HT`2AF
z@DNZO2Qw7`#Qw`2e^&H!K)~bS-Lt_&!8*WhWPu{%K0uleR4wG|Q8a3hPxByJsX^i4^&9u4QOtV?PmLb^iNd}3$c1hi<
zx=pXSgfY?4v(raMRmLCPk-6PL5oDXAJ+%&tjkI#k>8;|0y|-fGsBDDd`9vD_{y%Um
zmA8am=>+h2Rn~AmSESb6563MPCuGSd)WFxt3qwqJDlfR2sq||WPew&W{kpy|YZA)z_0|eM))~P6NfOAry
zg^~>ligF^~+{x7lFCn#R7eG;;!t>`jsiD-bI%MVKGPTn7=3cS*)OiKhN;<^sDr|oNwb6jWqYC!r-6m^bS*L6vK(}{j5w5AH;74;@E$hpPj7g&
zTfGC1$NFNKr)%v5!~5mwGdMJgHA{`!{iU;`s@?ZyD#q@$DjCW<*=Gluia@-uc0SX}
zLb?H!@Dfk&ql81Hsj2Dl$5GU7+CqK%h(}D!Hj^Cid)yTQ0g1NBZm2|#KU$G7h(&n4
zFAs&i&X-UACMIP6?Ao&qjY%H-wyjynwnu(pT*L>hsZ0wgbB88b#-Ir@
z=qJ7#qoO^`8@;3%*K688TH0bhtNQbI(a8nmnaB-rm%F{-T
zbCGA&^s9wIsgFYAAmV=L2qv%5?Z@q}i=|a+v_I!6o0zms)oc&$Dng?#1Wi2hoN$mg
zcIvauT+WPzbl!;Mn4#HsmPGRIv~8(5DMtU(@7qI;1BQP+q#(v?tWt~}w3@c0}9$<$ouZz@-gg-=V&_Hqo#fw
zH73wuaY5}hX#NtlBM)fH+P8En9y0ACsR6y#qON<|O|JU@_wznn7XPoWB-OCS@Yo#G
ztsA^3lBC=&SrG4%yY70>AA{a6IfsNwPMou_62lq0zFg5|G{&SOhTx7i2!*37x8SY3
zvKOc4t({G^hYFw3^%?>h)a_Pw1S}`9U-fxlDDG^H4Z)8uyH}Rlqb^X3fm+*e+ZhRM
zQur-Ax=>k5FIs?FX59z|g#=UaP)Z@<;#jYl?a3_fbLuCd4dvns3RKo5=M^g`j$V0h
zr^saLIWgEna+t{Nhp~S8?u}%=0MJ1MxoG%*btqC{QM)A@v~sP&g#Mu6A3~b^7uu%O
z)xD%YDnCLZc`3*alY|UhETSue!nU`whAM(ib)fky(9*gM?a%Qps!)z?RoM7P7KE~*
za$=j&YXF?j)j5!pk_x*W>@NBI4q#VhnjztT7_DEcBA_`W<&v_>eD+h;rO#%gS)6!;
zLb@syli7dp9#P!t(l)HlTA%|&9RGgGQ@2LfxzCJs$s23i8K=hBHZ{$khK*_umi#i*
z^OXVT$M5@{=D$NiwONE5ni})4WO5y(^`%xHNF60Y-3kMAhQEflVd&5`&d;}EqNZ)Q
zQD;#fVYrDTFYVo(_{d78l6$^%_Q24$nw}cJoYZEZ3$w4D!P7rgvoeiJc&c%xXv$Ah
zP|bE#BI}3)V@Y0B&pndM#+mfg3V%;zP#&S24A;}~vcMdw!^dVBOrDlOD?^q1y6p&|
zJ}9{*(I46#gtJZ;#P>^Z%73WPE~0GZv|FUmdT1YxZy+
zL7}OK`Ik)4CEeF)fGf&NMRugqe6Woqkv4llYZ5ng{GgKV2KJJ-g{t_iEB(q)&p@9B
zZ$i}$`A_7t0IB6TLz?`Y(U_~goan^23;J)rI|26Ul`>RWW%z^2)ctkVPnlzZ&lKZK
zYn0z*J;>S636QdQo}7z0+#VPvl`a}>-l$DvSjts7=(XN82dN#FUyD3e^xnSStBKs!
zc?^rI5`9AP8z`CtU3c`r-q)?OU-?^oiLv!mjK`YuCIboA@Aik}3`L3(J`
zK8cPMYVt%&W;1WVoVS-5w=)&F_(W^Tb{9y9fIci5f%s3}@>W8=+tZpe(HgYEM(y{G
z@CkEbN_Q0q0%z*=Dh#6pSB9AkHcaYH+*?i&-*E=+{qi(6mSl^S4Z$MuB}Fq+FR@!VxAV$b4?3G4@KL>VCT&HG9iw=P
zP-;yO09JMa*`2#*zlh5F3L8?$S%Rj8tIceUL$uGk`H=het)7e-tG%v0*Raqj`c5=B
z=J|#DWp{w#=rcubE}-|@26;wO^4+X)XAsUG(qW#GjSVz&YA=E?2R)NYT)l)?>Kx=o;$WYIPh*S<#4AZad;8Fg`r)^lA#uUe-rP1sqCsZvS&QXnuHaJHBUli
zrI+);m%^_3I7mOIovWOqkI4F@(?p_gG=4v++*klxx<;BozDe%|@~r%;50MasW_>1y
z6smilvzCI6&8g)|HyP?RLX`}5@+XYvwB}!4uE*dN+NOAi!T@;Oty{;YTU!DH3bDUs
zN;02=W`SxZ=xd_!>8<8rArCeoJ-Oq0l^dRtA^ts(IQdsSa-Yk%89M>piYt8Ry}cdG
zRLU9p#&tTGC8h1@#6x9vt*cx&5AHbSx%YxTrgBBY@pcc=NKl4)dYW+<^jUanYG>4{
z;-yw;tgZ{ko>>w^(AP`rX1sqd$RS0b5t4wi*}wL*p8mt5M1{%S%6!hB$gCN^ps4fq
zW=9zKry1xfs#faA0VxpYJeTBJsSw=uhuD9nTyGV?0)wF78_kmPu6&Q4nA<18!p+vR
zS+D<~?ODMmI@$@Qak8GPln_nFO73r^wIyGk=IwjQoktzOhrX-rioB6AUKh*|=6|kgib~*3sMN`E)I^FUgWSBc|dB14(Ad3q`Nd
zw>djFg>2z`3W1Z&BI}$@6q&ePie%}N29oC*O6RA>SMw!$?n4{0HTjJPxt;jeR;m;2
zs%g(sQNHcsQ-j`&DSRnMqR3S_%$l__4;OjoB&ZNs34p
zQU~(Vf&vC_iobLq4*H*_
zuVK@NPF_O3{S>qcm-P(9a@Uqv))x5mp1nkdt3@nAHV5l``P#a3C0
z#2hod=*T1Pyh@`$eg;OtS!-2FQw~ghg1T7+MbraCw*eblT#9x!U+BOSrSLBVuqt7U
z>4fYPSdM^{l7O=6`NJZ{Q0G-_l9IX8#zoWEDY~802k7`I{7zINk^fSEgo%kvrLPB*
zYCz(k<>Kw9R4ddC`qnX4IG^B>=;fhpiy8l|BN2-VPu$)ur4*l?J$Tjq6Q{=;E)G*v
zet8RxRXQp*yg|E@arJsuspeur@5*XDVGX6
zBpS|2m?8NgF<8e&j14mfk7k-#3l0OP=~=l=7x?7bzjH@Pl$PYQ=YM_add;YjR;JG$
zSeDy0Es|aiN^zigR*8>JUig=pIz+1Brw&+cBKJ8RMQ3kT%~K>S)$D(&9%ah1T!J)|
zw5-aIPx8zRvVkG_a^+2b^mHSnhv^mU(M;Nq<-G%0|SPx5nYX_7OxtiRO6
zlx3~CIm$yPs0ZkqD>YhrzhvsIGs?+*hIN{i36ywir68%G?
zV1aJ=u0v9YFOM7^)fLwnDDImvyW-dsiac>zujH=N#l;&^d*NYHi29+MS-#C_B!(o4
zbi!I;eRRvh)AK#2^9w?u4JJa{maKkmVh>eMj_%k=9mZ)1hNjPS)2$SB=T|v5s~Ok!
zS<=MapK>qx+*qTqe1?s2)flm6V;iPZ9W8UeD`dJMV#|~Vc)KHNP`FZqnFq63LiC#5
zgEzZXKL2~bJPhG{e$~fZ&Z6H~4chphSR`shRc}%v5X@Wp1~lCw-Bync(%B2LIqA16
zl|pjvc}A(@x-%#fZD4R#l54Q|oG@RnMV-+Vy4+Abd><=r=11Wr?oc@<`jS>Dv$ODY
znBmoj{yUmBoDzyd^$^~mMoh{fSDOD1a_CLvgDOIzcPGL6Z}
zUO%%yWh3)tb=#Hj+vuRWsID
z#27@+O1!pDMai5PgBE{hq2+$)!ir<19b%No*^;e}XvNY#Xr|);zW=+;*>v_W{sI6?))a@JQpW}G4_D`9<}qYjiOh@Kyi(iuJUgJ;V|j)Zn3ja
zR2+D(r0c)<9iNOuE>?5-Y7F9`b>B=bRH^~>{Zdxgf&qKHTZXt~ASZTr$p!@s@WYWU
z6t0_|j<+Rr&e6-T0DlHzL3=T@J`+mtu+khmf6*HUoVkS
z4HX)bp6TR$X@BK}Dy7SM(*-J#L4AdLl?P_1nwNE*6oJ*ODVgHk>!H{j$G`xP!3$J8
zcK}0!oQ6g~mBp}>sIXyow?1z~}k|4Bn^Fv8N&A|3hNK1*H-(YM@bzsVZjcZd&
zGeK$u$;lA@Vr?Hib+pe9nw-eeG4U#8m6s~i%d?t36sWq^7W?(zs0A!dQrojwk|-yu
zxRPAx&<)EU*)nq#Da11I7#YDMKOp}t?ViwB)5^!xI|;f1tSa5-Jn@({8iFlpkwpKh
zjI%3{QBsO))aC^wrrmb^K5|RR8~j2axTesngSgH0MOc?uli>O_N&UrjihA6);1v55Qb4P4RvPKIZkSUC^
zMv#qIeK7OR%9b%wNUnTCN-B}fH_ANUuM=hZy<(2`!Ehu_J-Vc1wbeIXE}V4vzkw0J
zUDsq*+`TOhBI9_yzjK$j;bVYCrWU4kh$D*yDNXTNElKE1k6y#|yIE^`o0t%chm{Tu
z!PP^SPnmDr6>|kE)8v_N>PaM%k~P$DupqLKKsk@+lC)%*E`8iLp0X>Ztq1
z(Ysa*!*QW#y4&2QZYrZh?i?gP1?qYdMKi>U3;Wc{6zXan_HnSdK}?8ssW0Eauqb%Qb+u
z4tN@PlQI5NQ#0Hc9s)AvTeb|A=bcT$<7DGVS0^A?AE6ZE2YaMhbZmpE$%mOVrlO|E
zDQkx9PvYY0%nuFc%O7{ubMjxwb?rt3m}_d#=dE%q
zK=(}bGGQq0fGQffbK?d^F;A!Uaf8MdW?R_zUIn%b+(KByyd?m#1sqw&zu8>R2sj$n
zzMdK#eeW!9y)N3TEbO%wF3i;ER7CqQ=P~-sgB5C|it|M7;d-7uAz>nbud?Ke_tIIW
z;~kxOxDA}UtlhQL#&c#^-CCEmEU~gX>7sG!#}Y-oJUPN%5YrU8=n?Z5#|3uu>V`-@
z3e{sx{OR_{QtbHUXnG>=6SHXOLGA%f-vK421&rdmf&)ZtJSG^YPlVLB;UcI&U7SFQ
z4^%_At4lKL9l5R@YxNTomRW<{9!<*
zb$fGlH#bZ#u)dsZ<`Zm}W=VyfZ;fUHW;-fsok^oWwE-4OBsrMmvO5JPPGo-(aCHDN
z6}olb1?{2LQs#?p;rM7c~S3SI(d6DhCp
z+B;MngP24fi-VI@`qs;_oOrs<=IfnebpzkPQ#TMPrX8X9>>hSgd%*gJxQ%8Q_O?53
zzM>g%Xg-_Lz4Ih;xg7uzP9%p)DzDGRKw|ZeP!4rtW4iHQNEjlvmmf0WU)zW8qV9OD
zS4H?tBPg6Sxz2JH*nqCjcC|`%N-c)-(FoW`@bRVNfkkf{kdJ(pBcD{#MScORAs9Y7
zd#hv+UBD9XiDkYB3Qy(t+1^oF7ZS{oz_+i+ipuU(yJ`^o=mE4~EZ)0zo??jYldJz|
z3-KzjZ^5D?K1M&Swfc5Y(M>d2@+#Ce;eI@B*l8U=F2acT3ib1q?{H#(p*k*$RZapW
z4s6(M5qf%ijAt`dwp4#r42pZhddHRX{cqMaI{EQg`D#poBn}&K?kJr}wI3A=If>|h
z9y6m<#eRKB7=^cD{c=7`(8ltyIdD|qdYtuJVP!>BSK|p1apSOL??QX9K5-O_Yw(
zot=%5(c;a{Ywt{w$R95p-yTY7IlOc0iTb8!T&}8!q@G;g%vM>bfTm+-q7dL&g$CIu
zN?(A7ZgL*}s-1uS-hN}N%k3b$>;T{Fn}3ksW05Og^B)Dq
zn-8Wb8MnaJ`)a^4Tze0Or!F1boHwQWUT*`A1K8aF{UgWZ;_2Ba`{=wM+V6>=m9jBq
z)l6&Fh>&1&U5TyojrF=hUZ;-n5$pL>6LMohqzOIy!cyM5dTJ(jgiJWlhw;ueKi^_r
zPWGvT6+92Pw9?bMSLGBHg~0g&w#c?|I`M@!*w94WmSTL*xVcCL;3>~vfBE^N?KdC=
z(yzDg4U*Z(dD$&A#u{}0i%iVFJq_23w5hIu!rngc6fW?c)hCR7kyagFsfrv%!CHk7
zT*=MLi(WQphF>kOm~=;w|6U0q2O_Ro;F2)wU(~HMMGZa!G2W$;@Z9o{YZOqCLPw@x
z$LUsEKE=a(RjR{--3^$m_;OWBz=N&Ln)Ss1ib%e6u7;S$S}|JK{csr&c;g13H1rC8
zc5$=Tt8PA=c6XoxAMeHfH^2tdf|%8*vnTZsB0b%8sNMf{8|2{jgR()?=lZn4c@vXV
z#C5%I5}23~frc%BHdQsnOXVZqsXpUmZtf
zcLU*>Cvay1<&Dp;v+28o1ljlZuk|0TKkqE9cRG&g&B0LXkqoi7_kf6joWuAl*RT*5
z&~)dgdF~`yyoZC`1m0Z=6a!NwvnCP3n&k$<*K+s<;zPM~1nj`%3J!LV-j3H0D)=ng
zktnEGSi|w`ntReX%q$I#)uCe+}B2BAcIM3tpF5V2YeSu
z?qP;tkpx_JTz`xPgnt7`8DtQ*y@_DuLcia)>}@y4Bdm3RP3zDoLSeoBLMb|H|h0dD~|Huj+xxJ%&9Lvy3zeL5;b6YtP+rU^2c=*?k=
z-*?)9R6#x(8dKnkzO)1PBi#8wZi#`e7x;ITC1%0$0Gj<*whi4#?=(lDWq
z+^6tAUApLJuzJLN_9UdFN)jI6JoD?8D%Gx_Vqqz9SZcle_4gAGLV*AW45tX#R)K5v
z>Ovo!FR90y!)kTS@fBN(ZH0vu`;C6)n8R4mTd22n-c7QhFE~H+#tZWXa
z=L^0Ci}CAMKiA*p?H0D^^`nJKDkPHB03`gCl($jTGVkBLyF2YKpMysPvvC+1os@Ka
z7xE0MMgZBzy3$A(+>noDie*$s5f%q8y1Uz8YX%fqU{XOYd(BP6K|C;!$n`=(BF*O<
zstCH?0>=;+|_Nb{&3MRbpYa{?rXgQYg|Dlh|J*q_@H!I1*kuzb)4JPTfFC4vP^o^__&eMmE1+0<`t&KO
zpzBwS8_M$^ODZRtEuLqWm+k{7e@61#Ju5r=#ful9)CerR{PB$2Qq|r4@@p^-2i~7$
zy-}Bwm1O`MUPA*n@4MgHoxT9L$ARhu9C^=)|Guch{q|}TJfv#zrHU!=CV(#mXQWeu
zUcz5R{p~-O-mPE@fFl=4{U{%YfItgqH9*zpbJ+gyzZXp@ri%!J2pmd*B!skKj(rE?
z2j%9kOuItxId`+NlnNpTXqqrj4w0TG58BODGisL1>DVM4@}6
z?R-6lkkH+62V6nzwtr?O@+bUYzCpXfn30h&Aw=&rFusH60D>>ILXCe10}BGzHwXYl
zR<=w;m&O{K*Tn&NYE@u71J$z}?*p9!Xigsd`<3L`pcVuua5H=x
zJMa5aJLtxffKCuJH()=T@c#Ly9^)9qwv+uxr3lg@lH7y!(>{o_y>yW(3kkFrClwetQDF${aA*
zZ*6S>YdtV;;QTu{eS9{(XGBB-CS8veJ3BhSo|BJbfq0%;UN?YSHvawA+k%1uFdK!F
z*5F0&+l{tB2lV+ZGH7pqPN=J_aQ(fF#h}(xfN?132(7!DpLCHHAie?oUi-fvhy~IGgI@h%B;||ec*`Ol
zKy$Js0(RIdkYt|;!2ge^w~ouId%lM0kOpZal#=e2kW{+6Lr_#2M35Aa5D<_KDM7kB
zr9n~z0j0a8k$4C1@ALlt(GLn-=j^lh%$hZ8*2(K%ZvTJZDqv@42M8Av2j_C9VV8??
z4iBQ+>%+Bu(BBn8-|g;Q3+RC-K)>)NQmQV4)j=O*I!IZdM72S_aQ6bEnCxD2*
z3kwUd5CLC{2w}v3C*+z7!Vt)t>g|8>D50Ltl0Wctfvf-#YZf?6U@fg|Y%HLIdJ_U9
zeDBG>2L?zCOhZUHO*-s-CBk3_Y3HfUgGbHP#lg_C?|J*ZOzCF-`+?acu-eE0Hw3_$
z3Agri_okkejKi=r9FE2A$d{wpz3{xanEsuHxtHBABzv<0P|twY%1ofuv>Qx0Zro$Q
zBE&$+NzA+*WdQTvaXqChgVGkF8c^)YIU42yt;lAu2A!eA)8L}hP2-aS7!Q&(lJkED
z(uq8*a}>@D6mi`h9q_9II_w2bHpZVpvQ+u(4AC5RaJ|#=tz5_xFzU603&X{n5OiQn
z*KPNA#7~5}b_h2&9eSujQpLSRSXgek+y1+7T2c=fq5-HU1f~)-(>LGS4~Jk13LR3?
z8$l&>0w5L={O?kzQPAFDE0DBI0_h0`cm_K(b&7q){f&hI$?{|7uiD7Qp%62V8l~L=O{}x`=5+Eh4IDn)LbQ-UlEAkNs?Q=*-
z2q01Yq9323E_%n577`iR{l6=^#!UVOq+bJ|-J3H7I4J>tPql5?NjZ@x2f`Rhm^H}h
zAa^Cg)BE3=Uc`gpO+H{3$WO4#_+V!efnrA&I76`3QHh-O+$@4hV|%8KXzahIc10!~
zguM<`Cddh4lpy>k!nG}IU?PgXe3L%!FBUSM+P!)
zaB5M)I3;xe>H=WMw*>A0>fN6epMmRuC;9*GW!4hXCL9ltXa9}8uLy
zc(NO>*ciuY`Ud_pf#lx-!&fpgGJ;foW6%^5g2Cs7cCGXM=@F5t7`NA{gJ+>eJ`9?S
zwD|aBjf5orcgFM!^~y3~XLB|kYCz^N<{)K6xoEhF>#*=J!Y&^PH)4`0^A22v(Eq#l
zz0$=aPl(-*Rc3%(fU3#jVe8kgAq4k;iOAF`SlWjh+Ux~?
zxk2dz9w+{ohuu+_BJFQ8fhp76$0h?{cG
z+y+4Z(Z)E)BgG#4F1J>Mdio=Waj4zD8}VSS_1L$73=5VyXxL<>@CAf!_{nEem3B%>
z*b?5$ak@~yID;VIFHM^jPiJSRIV0jt=5&gHlYTXV>U@KHAu>T#W1OuJo$hl*?u_C4
zzq7kI#RkNb?R51v+^87!koS%_>`HJ_Ss1!?lQ3yX0VUlK2d>xC)ArS{rHA8BpgRa7
zu?R}E2*TXS-{8S`!fE{VLhylc%?o2dRPr?5F6j+WnbyGO0!Y)TgmiPl#e@I_5QEst
zWo`KH%oU&nHi*VhaKK7}vHaPE_h+7Z7Nm<`L(Sk4RG^V_
zYX$%2n&uFrqoUN{*+GE~r)B$1HB=0zP}5Zas|bYtEU*4A5*=-j)2hqzFt2%2X~@0ac>GDjDIO$Vifi-ROz!rAE8pT|4~Tp{UDxI_x%zX
z{2rfffOb&)dBUuhXG&tCu-n!oK#8k2Jo~WI+P{9~cKw$aeCQ72j9T7YUc2)qe>*;1
zjD%}C`HCk|2qH3=G0nyGy?$F;TYKYOb~599aR?g{7N1c2_f9Viy@9gOHy`;1m7gjC
zg(DA}=DOi0d=5}*Ua&>3F8{WiE`%irB3!ZEJP2xP6(5TWsG*kuz`}gPdC@i$G&4|v
z3{d{dJ?Y_N>)1FrO;Ee+z7%j;>H+wcF6v$kcmbG%?ncDD)rEbJHPu3}Dg#MsTD?&*
z)QHtUCPE$8TjywCU;s>R*ZVtRul~C%-h89FKZCpP=j*S;2V
zBDm;49&B8l;6<2I4HkWD((I*wLH>bb_2MNzGMpXY4?$zX4M`P>l51A*VB+IDtoeQ9
z*#rRVE+uRs2f%zcNR}LV&d(9z1cWoCRFSxJiij>)ypye;ytY4|^?zgsVr&_TE^|OB
zH0k`hwy}T!AwpUN
z+>}P(E;W$A4d+rg7wg~tZpnrk5-#NQ!K~+cPCUfXWfu{{s;6&*3=x(%Je!)~V<1fL
zpLt#M!8QaQnAYWns`~{j6R^YX+f#4Xk^XIjuMU@0DeM`Gi;Hlo&p_!mtZNU}JR?uB
zd|NPYN^{6*7}!iqOfeh){(oAO4Ua)$fs9ynqw^feN?`qm#Uv2C<31dLDw%feNLBnS
zuvs52P}#KR;B!X7O^Go@hMJD~A8TZh6FLf?7%^~Dni;IQTyx_|)U58jjh%kmeD
zK*7Lh;uR~Uki#6vXpcO-wY0ti1?MBNXv;^T#E5V$iWV?`B`EH+qyV+<4V>TagBycj
zegu%T2!z#XZs2Wp6~$ff-=mapUK#krISAO}VWShQJJhvaV3P&6_W}kH0BpbU353Vj
z@~4j#s=4in5}Fkv-|N)*ufa98BmO8SSGxl1c)}tg21}whaRx%l{QSH~>fq|Z^0#Et
zid+A7+qDA-t^rVv=WEJ0{sE^s{+GXJo>qy&L4-$fR&<9Gdig;1!2Q=mCjy8A-!KbH
zOYR(mi~CdnE07d!($oBAakGvlFzoN_;)ZYrR09?2
zy=oUBDtJyzcq@b3OJj%Vik`nfg9;JS;=hFEe?xvl^9TZ6R7#4HX$=&s$Nko7>tP>d
z6HV;lssSj&I6k1fk8Ju*e~+I-AoD?gb9MIQJO)fMykXo33QT3e;v0EG%NTGxqfH!_
z`!8l{qj$_`fWW?~83B21jD3zY#bpY9`Mc5Noi|0zK%5V*9_k#+9qNJ<
z19?^cL=n!L2%w%D1H)7i*=BG=64t1M!1T7d?#-o$@d(uO6r`l_QBfHh`D>M~B6o~S
z#-F+hQN5^{-aNV@y-xLWd$s4&SU+tz||t};r_-f~pj
zy~(R@e@Hu?=r)9t!cp@r`kZn$?1j_LUtvF5f}x^m*Pf$RB?Cd&n%NGMj#&CQpXZyaVI
zrNGPpaKRc4ZqMsaGJd{*JqwJ7Cj>2UKYWt*$_5HzD*^aDNl@?DabM;-)hzvhr1}H4
zI-HWxaVY#g!5zYvx&8J4<8Nkw1YcZCT1G}kK$#a`9I}%SWXt399Hul6e~t=>NG?rQ
z_OP*;pMT^u6;DF1Kg!JSNtcj#u9_}6EXAt4zx&;^y5Ff7#LJ{OB+)BkXsIfSsSQlYLARGf3sl<{>$
z^*5tNZXxzJCY*E>FVJk`;kPo!61b
zC4I$6A3Z%|K0n2CU&@*{O+=*bXB#+Ej|zORB$DLR3&!J%O^C7>`iD~@a3)RbUEVb#1Nr!RtK-qRct(r_
zc&tJbN2pTHl+cA8`ct9+`5^kbZOwSkwIZHhK{%gim!2AW8kLZk7a$avRy`#?>w~cNxc>OY$F|~B;*h-x
zz$6%lj%_N!IcW62yfiG`ivUXxPJ_y|RQKPXb}JG~zA=8gyPV0XFv@@aBb(FUJU4sS
zQiJNe?=$BqA~L#4XrfXYx3LY>Lx(vqXMt7H_@@tIsM2h7c2JBp6t7p2O((eV<@LXV
zvL4-zkbB2Xf|AATFC$AJ+lf?`LC|R;Dck9fYOkP{{}s8!p#vdfQ{p*!`Dl
ze^JdZ7w2oy-NpmlX|zeStq?=sgBK@L2qlP}J#jU^+$d=kXE%${V-erC2~HR}<2Ve&
z>6+L{t;$OBk{Y`Zd{TluHzd4Dg~Xph-r5%f`C#TuCWFN6zL|0ct7I{(H_}~%OoAT>
zSqL-;+Gbk_+zpd*t`umvlZdCIVAzp?Z}~y|+*a&XEh3)8L1Be2mS4=IUIXDZ%V`WOB2@gxn(>SVzvNC@rflB3Pu*mz1+57}?Q*XwjdzNCd4=kFPCzSP1_
z^v9IdICEQPB#lTF4}S}l8;i23LG6l|TjroA&UP1{+DuT5Uxx%MV!7)H+kHN>BK@w#hr5Q|ue3X6E%ZGX6uZ4c6k?KkX(>rFufi2}FgY9e6zt@ttUm&16gY5=;i@SBMJG)KOSN
zes&cu%$Tz~Fpwa9z>E^9Pi1BHCwXu(DFc|UANKL)`5dHQn
za2*dNn<@BsuzaS
z)$+c{{+~aLEL_tr&o$3QcPF_SeO9TxI2~-YWlFPkHAb*EZWC>z-_rRn%%k+sGO9yVZ}5=Pw~K(=-5=!veut6hX}OzF^_I>#{g4e&QPICy8kzW^hshc(PK
zuDC{j_1uz@Y_!Vze1Y!rMI-qsu_~H-ci5hsm(O;U_b|~Rx)3o<-pj9*C#NSZL*jq!
z9gMKc`X4cAMq;F);|9NLV>7Ska@CVkVzZ%SeTZ|n4Lur%BKP*i2ZcLZM9)IC+t_Of
zDkAMZ6B0x|XsaAd&{JYGvR3LaOVL+9A`NWKFQcbLA>An6v@i|l3K!nK!`v(=6nc`=ekPcQV@aZXMR|dYguvHbJ1jlZPJk!~N0)7qGC{hLpkd(x>nb1(
znE(mjEN~s{5~qoU0beoa+D_Ns@ND>QDi3zSlhnwMFH9r-bM@oF+{fOK@0sO$0q!}j$b(UPD-zhp3
zCcg0IC7!tH>GB4RPhi4RDm?#B9(T`t7+rO>E{oYiPIEKMlW5vUYH6uyJ$mTzC$dJ?
zrd%m~F3w1m+M$i3Q+z&O(5VRE1XroftnCT!Z+bm%HPK0T>hO$(TV(`?a!
z0jF7>N;5^~;iD}1Mw~^g2;PbH&W`bRWm^g3q)2rvlbpZo^xzI
zY$a@~Gw`uKy#`cBHv#kP`wejg6UEbNIyz`0xU5(MMW1mgX{jRionMK?DQN5HObAt6
zw($M=9v2(bVG+P@3@i@7{$?0Iy8+^Vg9l~DK2wI_q=wP*Ad*<}jQHS-;7bA>!P#^h
z8J(_OxO1nSYJqatLGAT05tSqhjHhwAPPWoM77D5*bhfqHI`A}z&fUeiJSXt6Y-m0l
zRgAJ?R7&vGPvW*j^DKv%U?}9Co=Hj1;GL5j
zlDNLO;ruqr$`-pNu0go&i|(rqyz}xhX335At3)Z~a3eN*9mBwKwBPW{f;
zhK2f*lW$9!R}7x2N;OFBNDa3kBRm?~_?>z@8-snZGgY9=>FZ=BPaFQVh%x-b*Q;B_
zeifnPaeZ9nhO{GJ+0Ril5Vw#kQDf1)c1IRxWww2-=N2TRpa78MP3Q(O6uOGfHF7{(
zUYf_kJEnB6TNr4Tt6Gilcq?p%-aI=GhuVH9<>+dmQ?YgX4cKh}(V!k*VY6NT;e#Yg
z+?S%F=jDwgdf8%WvcmjTN93g%+R|>*!;=aUI8yAs7A_x|!@8tL5_Epwz8##DT;cOE
zs6U^Gu*gtWyGm&C`-ETzp?<{5$M#W1)w-~Tq@*OxxbcP01d2Ly=eKuC@-@Z87wg$s
z^0chu_IJ2UGH}T5;Y-pZSrOyBeTa(sR^r-X1yKlr?HKxS&<5^Y=nQLt>Pc1n(xq?H
z?bzlUPmn*n{lN1TlFWS!rNyoX5f38AXWpR`iS9ufN1keZXo+V@Y2^Hz#vace+gI2O
z_o1M#fW7XP(w26`lNWWwtFaiCz|zrpUtkJ+1)LesW+rQG{oT-oH%GXbv(<1RC4iiR
zRaHU8FDn@CEF~|15pRHe&O|f$naS!@Z^We8`$6%sM489FW|o#Qhg8#?dOB&Zy_;sU
zk}`}eU)*(?DR393T#Arn&TR0T)+CPe{k-o%rAD~W4LAsB?SpTBh!sD2T|!3;W`nK3
zK7Q9{Z~oBT1|ciJp$T4dkfA^;gR+)I?d`G`APsao@;j_b)C}@|bgOsPZ!^MN0|S5l
z{h%nNMCAeq}$9ATRgee5+!}+*0p#ePaY~bekY?&n|mwICm`4Alt8xgprir|@{GowkO
zxnbFgS*>wJO+h=)VlLyg?MVx?;-yAj)Dlpg36y;gAUbT?@hp(3yEEWxv(H*mfBvPQ
z1aW?6tcFwE=m}*@<9X7v^W*KX^gYhz#*5H`y`Lst(hv%Z8*poklyLv$eF8QizV0LG4d}QJPj>2GiG=Qj^(Z7nEGK3hhUsVYKxh$4&MZNqO>0))dYz&xObqO~dxYQ0-yZO1~ys%t83^hshUcBnn{TFqq$t^69kJqu{hsbp
zzL8U{+!V;5jEYDNmb19iC~GRM@O^kVBt2zfUUie1rn_Zr^d&08QD`U9eg7kiA;~{n
z!|c%rRBh~{n`RG{km_Y=!h;9APb_O5s@*tD!)qeyz=+B<`AvV3Gyz-KyT78x?~oBu
z87=O0I+<}WqO8R@ppU%|lTDb<#*`tsk5S`r7NS*4GC7TJS&E_;qQJJkWyOY0-Jjo;
z&hQBX8IP*uQs&DSJ}P1%R?%BS*nwGfZ+MJc&WK7R1-ZPsv1o?oAH6$Mm7%?@%%UHb
z>Tcpk{?*ZU&kR?=6~`nb>r;BbBV_;8NyeX;g=HoxgR+V;Rwq-qi
z`poa~#C1Ib88#}CAG}ed=wiOxdiKqeU%kzLp;03lZ!>g;;J*)AZ^>F(7FEbPKlkP2
z5r%KNC^xUpXk0bvna+4mJCc#oMQ~BS37ODIL31_riB}FSz8gP>MyjamhRnnP)clev
zqzTQ_FB(RoDR?Asl+o|F41WryzJ)Pe{7d~>WF-OKm6GDQksr0U&o_4kR9pIWI@bCo
zk_Pt9@b$;(ox0WE=rmORSJ;$ST!BM37+m3w68hhZ@%x%|zsH;@7G`E%9Y~m-qB30E
zh+K`TJUjbnYz7iWpeWvLzNXvFN_qLpD&?hU^(C~X5T4VT&}Py9oDL00O84VM+(95y
zfBA%58(WMv`b##2owGTDQiu^(XF+Os7ho{daB4c0eo
zUk}O*aX#m%bsTwEmS^%ToAby&d%L>GWJGl$bk_qvv2w95h*Q{MhPce2k`a$tjV4Hb
zoHZP&`c`RPda3QmU0=IDT8J5vbXwm0uT0!Kym%ZW43+1Y?k@T-m^IN5s*m5ICs5T9
z5nL#ULTX#8VrWy?`R+GXK`BE_KC@_k&!5F80FS~yHmJYqoFw%&+K{!=VM9HyZ)f>o
z$HJedZTU!__!P>C;y%2ezl*s^FGEsTVk&PJXp4zeUzMk|8&QAm>4H`x{T?ZSYg13N
z0bXuHWFa6RaCw5aLjEKLkq(CpwHxIcS?_yTk9O;!MdK+kO<~moDJ)-Jw^q5Pk`(qf
zS1f>vuwu$Th!5sbi{c!q%nZC4i%z=pgr;zq#p^?e)bHA04I3@=)
zepF|R=x;x&D%y~j*S>I5X0GNYe2Pa<{hI#JNtQ6=C!N`MDc&-2OnMuwb@71+**2H;
ziBB~d>Jd8l^xhcp-OFeA6!%#UsPUwZFkS8w>CW$ts`}~w>?|E&9
zGXT>WxnJT;9Ism%CG0d3d7RH2gbz)we@^xYict|+hi)Re#>?sB5a=g1%7&3N?9{VH
zodZAiXHIDLPmj>7o7rE`ErW9iTTg8U{rB%n0YPRlBOZ@qV`;}S;xeYRIEhi)%`ho0
zF=B5^vv_E$ze5cB(CF_qXkfpH8-x6r)cE#K_S*6cZVK}%I@LeM_}L%q{Iq>VyWR|4
zVNoMLj9j&ajIWDld3^kb&RYQ(whns6<_4vt=&
zp7ViQBBF~vo>7~!QM&pHd69#w4qEH!X(4yArI;_pJ+LL%G05I
zCZovr?&Yv_1mnm--ss~6&$gKqk^-%Q;bEDK(dq@YNn~`dc#_0gRVVgSUt14bNZEr>if~GJyVK9JA@#pGZ9eo
zSKI8F?nP=je$5CbyUoE|H5o?Xcww}={@wBU_oq*-?NE4-u*Z}6tiDY~WKIN!5FeoK
zX{%Lm%&ESNj3r2aI+~^#7;(&NI3<<7M(0K#D>VIe8_jmABDd23X(w}(hsM6D8z-Rs
zq^?($Y9y?7jP`YlLiibCPJ6a6I>||>lZZ36)Xv4Nt=gs_2hBkq`A;L?y&ka$b
zuk@Yz{#g!UA_3BLa&+x$6fXz&9YRFv3`q$Ua{i=E=@8zQYESeLGkvxuWlu*c{KUCH
zUY)aP>kp^_*bJK&e4+*TX8*89h97MEWB^un2Y}2I&5e=C7%fv{fr8?SJldXap@jam
z^~ikLl|_wkcEaPZfD6B!-8S>PeL3?-Be8<5vLBB~WBB<#z7io6r^?p8t*dpk@0T&M
zq)H~stU(=dtx}1fhSoP|7+5o}IQLL2k(oDsUFTJob(zzLmv(oUD9y?&eI}~SnlZK_
z8cnZnS5fq9y~iQtY^TQ;N%(op7?l>kh1Tp+D|+Vms8p%pTBCuJm0`;$cBVa(&t#SL
z@X)5YWUuMAeXS4~T?9c+NTGyH)=dIJZh!XS?bqY1T52bbveK_p8%2u4PBdK
zP+Orzu;?H%*=@$zkmzsIvdvFR1-xl>5r3RJV0y4%#`VVk=!rU7D9S6I&pM3Y(bYjK
zW{7L`bR3$)L?bD1)-tyyWcgFaLFT{xv|_PF&CnFj;FESyxklGK-Kxvd3&QXkJ}T!F
z248VHmc^XAr@dXwf0Et9xz;5flhLUZ5Z9~`KetV#rX3_Yp?5FOD(H%;Vha^FE0(co
zZGZc~yHp`LfrN$Bft=%`bRapAWr&)DnVNDrbChizDvT2Qt%yHI<&i5r_iSpTeRk`;
z{F4EF9o``Ep0BJd-XW$xqch93xjwSL&B3UGxgIj`OmK?2ZLb
zCQ6Xot9s_F5aLyX;B_Rsx0&3?UnqD>vOe~!>yRG_r)e&3RCd@u<@?!&k@kq}i#fNp
zKCb9$SgQbrsqN5sWX@}Zt5u&Ufn+~nqt8Gpz2+LZ`zZQ?nMq^M>KO*RZ5nTI|wZ5amTm66Kw12rIhaSsE(~B
ziL;<|Y*BvVy>cmcpC2mf`|U4{I68lZOz%vT_wI+Ls*0Wd_mR`l8pO?F9G%;g)g*rh
zL~j!u&HON=467wpcXIP5?d0>j6E7Slhmr2Gu+PkiOSwlL#t8^Q~U)*+0OqLC|TBBDh(|ASj#~xwd
zugb3X*()?(yD9Ip3!%iw(G~HV4difE*3fGep!|YjAh5O;I1RCh|FIX-py&GzolkUX
zkGcKs^x@e4(b`yFEy>44(|Of=TCJJP+f?F@?Vm>#{3+n|9G3agm+lnuU6TX3_K~(0
zmgs;aje}wdUi->E(ar+p2qG7g%a(i7aG~@k<+zL#60$0t-N;8Df-TiZ0)F@|dR%nn
zJ&<|M$gcZ~gMzm0o0-CCe7Jmqr>mUihaTA)k1P*S+aX0IRI)GD!$-1yr9=_IBe6kQ
z@7v1g^>Uk#dK|e!+ef)PvX2e%pBI?1S?1%#hn?58MVdKqGNza*pipieML*Bf$WfAn)RR
z)cXr;J)KjD(*(V>bdPXOmE@ipu&O=e%RM|ow_0^v{LN)ppZR5!1^1|av8gp<>M#(w
zQ+{51ir0kZNkrzJ-ZQpP;e&wZnq(NlAL^d`?iHb%Ln1OA$!oFuZE(ATt1gw=3^t|Zm&8Oy&66XUE@xVj6s|
zRAUi}L8-SMGwOxVNe8=bO*#Tw_^LSdf6y9~4|^%jgXCf!dq7~_0*z^Uwk?kmTXMVh
zHyzRsI-htACRo3JbX*gVc|a$QMjMhbf*XSJ?Fi{J#aJ3|aXLpgQQt3RJ@M_%#t$Mk
z4sEj$@jRI1g^DQxb0l#eqZ@><mCs=T7bou%lok?F4p)a45TsHQR4+?3ErjGdvMgYsa4
z2`6&c14Vzg`MClddJ8elyKmDr*<&3?bP^@^GV?V}0pM-cHiNzhI)YyZMG|0M5V#RL
zXtP<>Kb97kZaIzbw_B*hVEggWdzQ<2og(8OqsDw6TUC+J*}I;a@!Tpj
z7W%+Hb6~cB@@`1()!lc~jtqjyi*?z*qW19Zh5Z%eqqp*O`M(74tqDEUtFra?m~fn7
zVZ$8%aW2zs5qUe$oBE3YU2=Zqs?V-av!X}dC*$7G+VtG{9^HIf*?ZqShe=-ZXpxMW
z`=n?%9+zskxuTKkXm>a1JvrJdwA40`mD6Ai_d~zOT@*ngh@ERqFXU#F!|j*ioMcov
zXMRV0BO6aI+K2*+#l1r%U7+-DrSp%eYQ
z;rXlI|Jy8IK#a8OkHvw&^9E?WUa{7XGJdxxo?n<=GNgtnGb?#lX{2Ll+(!9|z~8li
zQH+#ML|Z^EhEapEpw?FS3;mv4YSidswXVSVGXZJ|j;Cg{zh*G1`HUC*t#O@LFl~6`
zgp~|@?@Z$Dxl|l=jXmBEnKe@6?4qJg^R^k*^U#z{v(V*X=i<6}B1m9Czjn
z&m%NKVhr*<2_s;I{WX-5^q`}wbrT=V-r8F|%e<626`{#v!WX5}*Jt>vM&_XN`neb0Kwd;m
zUOPt$`nR-o(nAoqo6P(r^e_4yZ&d%{*I+)InyG0ERLL;UlnG_5Tm43;;@3C*z65)Z
z{jHIFG4?kLIZG_bMy9Xk3GTt18v1PwwUnx$9gKA#pA=4nSR>j@ovGFg7X1~=`UVzFk>nX8hashFIEPe&|kLs=wvdxr^K;d
zmKweWj*sB%#FCnm~Yrg+!mPfN4kI@Wc^&2(4KA3Qr|(|bN;ICj&;Pxxg38lD^7
zyozw#z6aRtegq|f1nBb0P+~U^i@*NSFJoEWGUUiy93zil%GsI$Mildr$20SMD(Tg^
zo~*+QHTf`!Iuaw%rzrdBqYiZ;s*FUL=;UXgrV`fgX$@KWQGI(>hPcfCd*x+yxr@fJ
zp|y%d)GdiGU9^uMjx1zsN?JeM56{VOuQTbc#B^|p{TA-Z`&*Q8S6}Y2%fRD{n7N@x
zwnXac{VpVbde6!NM{-{`awWQ(^Gwr?>GhK0+{@vm9}2d&S~YX=
zQpIh_kQhTJUq_bBp;0(5
zha6^1Yg`9JQfIB-Q^-`WPd8tg_yVpbql`cGF?>B0M
zPBAy{h6*N!)^zb?vC7H}F7^&R
z-Tbi`>o#7M$p4|UJdicFc*x$quXW}?B}r^dS`nRRBF|MYVcce`SIt*}P4F?<7}DK)
zlCz_Ewc)a13{=sKUr3yFH&Dwf?=s&RYd|gWSo~HK`E>TGjZXTHk?4Oci~VaTkU{-_
zGxp)IlqZjB=!>l%ksFDd#C$rcP*HR$>B*!-8qT%DdH$3vS;P4IcXHoy>&#cEMjm5$B^@eH=p}TPuq1r+gHmP{zmz|C
z)1}d}W9@B0C4c&u^^x|^KDYXCt|ODozAt;r&0KPr?eGHXp{x_M{+99aCx|h=!l5X@
z(YqPcjMedhzU>(F3uT%r%g`*H0`VE1`1#V|*wFB-px!sutephA4(d>)<%j+TOogGA
z1e@~7(sEnKuwbGfNp=-x
zOK#k+#H)Qgi&Q=TohsVC+mVyHAEk?dRZ=y~x%OVNhw7~tSYP}`PKsKC7RSqrHpS}
zQ{rmQ)kXI$?#A^x&JW#*Ha9~uoqVN#`Pfid@*cblovrIasQPuGeV=%@mL`_GYjBaY
zd6B@AX_vd>gGv^}O6NFuPUdGlQ>bCmw!;jTABe?+Z@me)8XMEnNi^vfaS!d0_@J{@
z@zykL4YkP_hJtQPBp@S!#$9-)Rt2PF+p{e#;_?aT3R0QYv3N*wRapYh{B7%)$?m`W
zm8%Rxio%rd2U_LrBp(Ie5}qg@=8Hwv>K(AuUv~cS1hck|g|ap`tMHIxLBQ-T)(4~?
z58dgTW=eIFnMRn@v~_e1)BkYEeRkCDGM(LZG;fS*Qhxgi&F&mTNEh7_SNnL?sh(4I
zn}YU*e_k8*+I+3_O|UHwNDpm{eS*Yb&7Ox8#jYIDP|GXqhCP6W|K-P)NY0}p3+oL9
z!-J&UbbVGKZTC+fY`v$}(C1;Jc-3LEL=mqz=;3m-Xwp|5rnsfT{*wrLKO@QcW;9xIjwr
zKuS~R%VWv7tSDNdxYld-G)6{eDLzXJRt^rdesUfwjDoDVd@&=D7-m=Dsc_pm*AI{H
zcUcn5ma;e%|B0(NKUU+}np&t_*2gIe?xLVbUYoMj7B~Nkv&>&g
z$Tp%pC2>kb`#8T)O!sfO$Np#wY~C==X0iymmeZ&EU9;6s6UeG1=>%uLShwb4m)s*$
zXuoF;&F-hxu8v&ohga<_Zv~$b-{y$flQItI`_odmc<<6Tm9I=c?RgmXGduKwhVptH
z)<$mL_rh^~*?&AV(7D#_DUYyK50`FrUD%Q5{>G9Dn+j#g?9JuY{TtOYX=mCM;jtSZ
z_L0ixV!v??hEdWVysmzv@I40L^oh4b}r%i`;yar1`DH?~4lzydB<1gjed><&yb#5~|
zXH<{qy%zlpwLT~Yk?mlTvuRLllPzNjyayJCknzMyOY>x6q?CkiI7gLDxAT!ZqF@hX
z3G-#2qVlZ*rI|`fk0KWc>D00`zGXKl4U`k>7c21WsB3Qa%uk0_gyr-vI2a^IYhvUo
z$jp$jh3?e4CmWo6%*^F={21Xcqx>h-DrXBlhN7G=r9pPT>((*{TikIY_Hy%|i4RO1
z+nx48k4}^bMOZQSxSk^7Ihz<8hfVvy_0CF`EwNKX
z`J+FUX64hx!XFV-3V(}#rr`CJzDT*ngVx~hkF=KzeZ8OPHW)d*@AV143r#ZPZBS4(
zEBuO!wYs<7eNtD98i<7QK#Six<+u@p{gFfRaf%UY>SB}s!J6ezcZ_cMf9%D7jjK`$W*_b5W84DD&@
zUdm{>t-4)e2GNkHz$eQ{Uj(Wl*>Iu?c2kXm;BO3}(y7xjUuP1uH8CgoRqm;qTCh1;
z@S2F8IdK+EMks%idq*5mV%X9QV@!?9UkZxT_|C>tHA3A$;SIv`0~jB>(cpwXqPuP0
zj{cP7s5;vxnn>|+%e|4^sfW!r+s9yo1vAJLr8fOPp2Ij(=5vE52212+T_^TYFoN71
z_*`E@qdlhh8G?DRsoPIzVExcTUjLJ*M4xF+PCvSkQQ~+K{`y+$S{C@4IkafQcoU=iLu7<)
zhg6L8HYc4{1SR=VfvA}xMh=hbE_)nNx!Usza|LvJf(FT*D?W?Jnv4V`Y`;Zq)YfF{
z*TZSFL>AlB;@5VI!Zw#rZD+<9&=Lz{?!gf7G$IJ*+E7W&N<``Z|IS3Yast=}8}c?Bx)o+I*bt3yl6Dkm%@mFdgpGii|#
zemJJi-@w)the?-Y6(?7qe|*)BMqi}n)fo?^(~;_j=xq?@tE`DWA{zQY4F4-%{$+<>
zhlsIlkUaL1m8pe>#JBcDX=z6_)8B;VttvKXB-HMbVvD0zHTbC^-ic?LBM%&Azv7C9
z*USv>cEy{dm1?Rm@OIt2k&x8H!+Z)W>9FgA)ya2il_^WT?ZVenSW)AhJjcJ>XP=!P
zc8xu3HMkzx+z_D`0Gd=tZq9QCDpo2+T3=ncGjb_59=b)83XTAJKnsMArO!G
zE7Ojx^r6-RF21Ga_Q$930>bV**R{G|q7?j-f`S2Ew-nRx-UO9(_wgVdfZYg|Nb|Icq31k
zGkB8mCFhT%u+{I=8}pj@*w~QJ%8pa)?B173z2SNf-8DYgVVFIXczzz@IpQDET^eID
zVdba&le)4maVoz^Hpel!o`2h2l<^n3k;5n_m-$})=d^kVdA@?z*j*q+|&^uoiqzI(TeOI~7s?9{VUbyh>YUa3U2
z9GRT`4?-;D;=izEqI+E`tZH&{a-ag@a>LcTdR~L)0W0C&mk$lr6;J{nK}~J!wcm-)
zZyiaVj%@f_sW^MX7z=yhB8F!_4xgJ&Ip1hPy>;34J9)3KBec){j<~RnNBgJi3nq9&vbw&F20mXx=;k=T-LmP(_eNt@T01-S
z^`Ua@*_2d9AAw1yZ||R=)=&|5O>R?63ZwE1i9CMy=NpqVshO?&uLo*>SP1(TXOmyl
zV@f>#S^jqHy(#$Y1-t|b`d*FC!M6M93O(g_+eXVv27!d+ho5I(l*U!QTIj&TQDtIq
zL_uckbD&p!!2Z@RY7m9(NmN)~+!&HJcdy2YxViTnOJk`b6F09_LQ;LbQBy#Vtbl}4
z7bT0}zHHDHA0G^Yq-b~XKXfIlLASr8xHXjYn5$B{UzVSr-{<%98YlzUhpG;4+OW^N
zt3iOi0jhdF
zRzjH7gDm0jJR%t(tfY1!wB+NPtha$fEe7FdFwPd)%a!_NVmbv
z%`K08n3{rq{E@r-DS)6|^~Hp&R1(DvAstaXyXX1&Nx8hbGuzIH&S4*8TS%t`Sk`6R
z#6D_qNJn*uj#CJF^sVDIO{if7|{)F
zPxt1Xn%Anc=hVHW)b|Zvs)EK`uzkLCYtP+gpfv(!D$AEzSaD;)RI&fYEMTfWb&j?|
z%;bC5>v!KjJ$s8lvbpiD!(6PWI#W>%Ph1(Q%Rjy=j`zS!USlnr9O)h8~>Uj3HT<
zBq90DJB7=#8kG&_2(R4n%9{1Bqj2apCYvHPW&dDI-?s}+P79x{`