From ce34e46dcab8e47335746a9ee399be7a593a0589 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Mon, 21 Feb 2022 19:43:53 +0300 Subject: [PATCH 1/2] Update Helm chart --- README.md | 1 - deploy/ydb-operator/Chart.yaml | 2 +- deploy/ydb-operator/README.md | 29 +++-- deploy/ydb-operator/values.yaml | 35 +++--- samples/storage.yaml | 214 ++------------------------------ 5 files changed, 43 insertions(+), 238 deletions(-) diff --git a/README.md b/README.md index 31d5731f..531b3d63 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,6 @@ The YDB Kubernetes operator deploys and manages Yandex Database resources on a K 1. Helm 3.1.0+ 2. Kubernetes 1.20+. 3. [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -4. Support for ([Dynamic Volume Provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)). ## Limitations diff --git a/deploy/ydb-operator/Chart.yaml b/deploy/ydb-operator/Chart.yaml index 61c80d7c..ac632107 100644 --- a/deploy/ydb-operator/Chart.yaml +++ b/deploy/ydb-operator/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/deploy/ydb-operator/README.md b/deploy/ydb-operator/README.md index 64409746..851d441a 100644 --- a/deploy/ydb-operator/README.md +++ b/deploy/ydb-operator/README.md @@ -1,22 +1,25 @@ # YDB Kubernetes Operator Helm chart -## Parameters +## Add repo -### Docker image configuration +```console +helm repo add ydb https://charts.ydb.tech +helm repo update +``` -| Name | Description | Value | -| ------------------ | ----------------------------------------- | ----------------------------------------- | -| `image.pullPolicy` | Политика скачивания образа | `IfNotPresent` | -| `image.repository` | Image repository | `cr.yandex/crpbo4q9lbgkn85vr1rm/operator` | -| `image.tag` | Image tag | `latest` | -| `imagePullSecrets` | Secrets to use for Docker registry access | `[]` | +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ +## Install Chart -### Resource quotas +```console +# Helm +$ helm install [RELEASE_NAME] ydb/operator +``` -| Name | Description | Value | -| -------------------- | ---------------------------------------------- | ----- | -| `resources.limits` | The resource limits for Operator container | `{}` | -| `resources.requests` | The requested resources for Operator container | `{}` | +## Configuration +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: +```console +helm show values ydb/operator +``` \ No newline at end of file diff --git a/deploy/ydb-operator/values.yaml b/deploy/ydb-operator/values.yaml index cd4161a0..f07a3669 100644 --- a/deploy/ydb-operator/values.yaml +++ b/deploy/ydb-operator/values.yaml @@ -1,19 +1,14 @@ -## @section Docker image configuration -## - -## YDB operator image +## Docker image configuration ## image: - ## @param image.pullPolicy Operator container pull policy + ## Operator container pull policy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent - ## @param image.repository Operator image repository repository: cr.yandex/yc/ydb-kubernetes-operator - ## @param image.tag Operator image tag - tag: 0.2.0 + tag: 0.4.3 -## @param imagePullSecrets Secrets to use for Docker registry access +## Secrets to use for Docker registry access ## Secrets must be provided manually. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## Example: @@ -22,22 +17,20 @@ image: ## imagePullSecrets: [] -## @skip nameOverride nameOverride: "" -## @skip fullnameOverride fullnameOverride: "" -## @section Resource quotas +## Resource quotas ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ resources: - ## @param resources.limits The resource limits for Operator container - ## Например: + ## The resource limits for Operator container + ## Example: ## limits: ## cpu: 250m ## memory: 512Mi limits: {} - ## @param resources.requests The requested resources for Operator container - ## Например: + ## The requested resources for Operator container + ## Example: ## requests: ## cpu: 250m ## memory: 256Mi @@ -48,6 +41,8 @@ service: type: ClusterIP metrics: + ## Create ServiceMonitor resources + ## enabled: true webhook: @@ -57,6 +52,10 @@ webhook: type: ClusterIP port: 9443 + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## patch: enabled: true image: @@ -74,7 +73,7 @@ webhook: tolerations: [] ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## This defaults to non-root user with uid 2000 and gid 2000. *v1.PodSecurityContext false ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## securityContext: @@ -82,7 +81,7 @@ webhook: runAsNonRoot: true runAsUser: 2000 - # Use certmanager to generate webhook certs + # Use cert-manager to generate webhook certs certManager: enabled: false # self-signed root certificate diff --git a/samples/storage.yaml b/samples/storage.yaml index 311eed78..6e3735b4 100644 --- a/samples/storage.yaml +++ b/samples/storage.yaml @@ -12,44 +12,16 @@ spec: storage: 80Gi # version: 21.4.30 image: - name: cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:yaml - nodes: 9 + name: cr.yandex/crpl7ipeu79oseqhcgn2/ydb-oss:master + nodes: 8 erasure: block-4-2 configuration: |- - static_erasure: none + static_erasure: block-4-2 host_configs: - - drive: - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - - kind: 0 - path: /dev/kikimr_ssd_00 - type: ROT - host_config_id: 1 + - drive: + - path: /dev/kikimr_ssd_00 + type: SSD + host_config_id: 1 domains_config: domain: - name: root @@ -61,7 +33,7 @@ spec: kind: hdd pdisk_filter: - property: - - type: ROT + - type: SSD vdisk_kind: Default state_storage: - ring: @@ -104,215 +76,47 @@ spec: service_set: availability_domains: 1 groups: - - erasure_species: 3 + - erasure_species: block-4-2 group_id: 0 group_generation: 1 rings: - fail_domains: - vdisk_locations: - node_id: 1 - pdisk_guid: 1 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 2 - pdisk_guid: 12 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 3 - pdisk_guid: 123 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 4 - pdisk_guid: 1234 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 5 - pdisk_guid: 12345 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 6 - pdisk_guid: 123456 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 7 - pdisk_guid: 1234567 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 8 - pdisk_guid: 12345678 pdisk_id: 1 vdisk_slot_id: 0 - vdisk_locations: - node_id: 9 - pdisk_guid: 123456789 pdisk_id: 1 vdisk_slot_id: 0 - pdisks: - - node_id: 1 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 1 - pdisk_id: 1 - - node_id: 2 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 12 - pdisk_id: 1 - - node_id: 3 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 123 - pdisk_id: 1 - - - node_id: 4 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 1234 - pdisk_id: 1 - - node_id: 5 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 12345 - pdisk_id: 1 - - node_id: 6 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 123456 - pdisk_id: 1 - - - node_id: 7 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 1234567 - pdisk_id: 1 - - node_id: 8 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 12345678 - pdisk_id: 1 - - node_id: 9 - path: /dev/kikimr_ssd_00 - pdisk_category: 0 - pdisk_guid: 123456789 - pdisk_id: 1 - - vdisks: - - vdisk_id: - domain: 0 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 1 - pdisk_guid: 1 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 1 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 2 - pdisk_guid: 12 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 2 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 3 - pdisk_guid: 123 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 3 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 4 - pdisk_guid: 1234 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 4 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 5 - pdisk_guid: 12345 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 5 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 6 - pdisk_guid: 123456 - pdisk_id: 1 - vdisk_slot_id: 0 - - - - vdisk_id: - domain: 6 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 7 - pdisk_guid: 1234567 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 7 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 8 - pdisk_guid: 12345678 - pdisk_id: 1 - vdisk_slot_id: 0 - - - vdisk_id: - domain: 8 - group_generation: 1 - group_id: 0 - ring: 0 - vdisk: 0 - vdisk_location: - node_id: 9 - pdisk_guid: 123456789 - pdisk_id: 1 - vdisk_slot_id: 0 - channel_profile_config: profile: - channel: From 597400a819b7431a3bff31e59f3086509652af70 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Mon, 21 Feb 2022 20:20:08 +0300 Subject: [PATCH 2/2] Use erasure `mirror-3-dc` in Storage example --- samples/database.yaml | 3 +- samples/storage.yaml | 144 +++++++++++++++++------------------------- 2 files changed, 59 insertions(+), 88 deletions(-) diff --git a/samples/database.yaml b/samples/database.yaml index bd0fa7fa..80087b85 100644 --- a/samples/database.yaml +++ b/samples/database.yaml @@ -3,7 +3,8 @@ kind: Database metadata: name: database-sample spec: - version: 21.4.30 + image: + name: cr.yandex/crpl7ipeu79oseqhcgn2/ydb-oss:master nodes: 6 resources: containerResources: diff --git a/samples/storage.yaml b/samples/storage.yaml index 6e3735b4..7ca79802 100644 --- a/samples/storage.yaml +++ b/samples/storage.yaml @@ -10,27 +10,28 @@ spec: resources: requests: storage: 80Gi - # version: 21.4.30 image: name: cr.yandex/crpl7ipeu79oseqhcgn2/ydb-oss:master - nodes: 8 - erasure: block-4-2 + nodes: 9 + erasure: mirror-3-dc configuration: |- - static_erasure: block-4-2 + static_erasure: mirror-3-dc host_configs: - drive: - path: /dev/kikimr_ssd_00 type: SSD host_config_id: 1 + grpc_config: + port: 2135 domains_config: domain: - name: root storage_pool_types: - - kind: hdd + - kind: ssd pool_config: box_id: 1 - erasure_species: block-4-2 - kind: hdd + erasure_species: mirror-3-dc + kind: ssd pdisk_filter: - property: - type: SSD @@ -76,90 +77,59 @@ spec: service_set: availability_domains: 1 groups: - - erasure_species: block-4-2 + - erasure_species: mirror-3-dc group_id: 0 group_generation: 1 rings: - fail_domains: - - vdisk_locations: - - node_id: 1 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 2 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 3 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 4 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 5 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 6 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 7 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 8 - pdisk_id: 1 - vdisk_slot_id: 0 - - vdisk_locations: - - node_id: 9 - pdisk_id: 1 - vdisk_slot_id: 0 + - vdisk_locations: + - node_id: 1 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 2 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - fail_domains: + - vdisk_locations: + - node_id: 4 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 5 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 6 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - fail_domains: + - vdisk_locations: + - node_id: 7 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 8 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 + - vdisk_locations: + - node_id: 9 + pdisk_category: SSD + path: /dev/kikimr_ssd_00 channel_profile_config: profile: - channel: - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - profile_id: 0 - - channel: - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - profile_id: 1 - - channel: - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - profile_id: 2 - - channel: - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - - erasure_species: block-4-2 - pdisk_category: 0 - storage_pool_kind: hdd - profile_id: 3 \ No newline at end of file + - erasure_species: mirror-3-dc + pdisk_category: 1 + storage_pool_kind: ssd + - erasure_species: mirror-3-dc + pdisk_category: 1 + storage_pool_kind: ssd + - erasure_species: mirror-3-dc + pdisk_category: 1 + storage_pool_kind: ssd + profile_id: 0 \ No newline at end of file