diff --git a/manifests/ansible/kubespray b/manifests/ansible/kubespray index 514d19d1d..6cc0e8441 160000 --- a/manifests/ansible/kubespray +++ b/manifests/ansible/kubespray @@ -1 +1 @@ -Subproject commit 514d19d1d09dff13ce1066b626a1ea47bceb898d +Subproject commit 6cc0e844163718afbf543286624bb7ef35a40209 diff --git a/manifests/ansible/kubespray_2_17_0 b/manifests/ansible/kubespray_2_17_0 index 54bdfabff..49c1f245a 160000 --- a/manifests/ansible/kubespray_2_17_0 +++ b/manifests/ansible/kubespray_2_17_0 @@ -1 +1 @@ -Subproject commit 54bdfabff4a53c5383aba944c4866a008b1eb4f1 +Subproject commit 49c1f245a27c623be5ab9835de950e320ff7af51 diff --git a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl index d27abe983..9b4076f10 100755 --- a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl +++ b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl @@ -1,371 +1,371 @@ -{{- define "grafana.pod" -}} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -serviceAccountName: {{ template "grafana.serviceAccountName" . }} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -{{- if .Values.securityContext }} -securityContext: -{{ toYaml .Values.securityContext | indent 2 }} -{{- end }} -{{- if .Values.priorityClassName }} -priorityClassName: {{ .Values.priorityClassName }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - securityContext: - runAsUser: 0 - command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] - resources: -{{ toYaml .Values.initChownData.resources | indent 6 }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] - resources: -{{ toYaml .Values.downloadDashboards.resources | indent 6 }} - env: -{{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: {{ template "grafana.name" . }}-sc-datasources - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.datasources.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.extraInitContainers }} -{{ toYaml .Values.extraInitContainers | indent 2 }} -{{- end }} -{{- if .Values.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end}} -{{- end }} -containers: -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ template "grafana.name" . }}-sc-dashboard - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag}}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.dashboards.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{- end}} - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . }} - {{- end }} - {{- end}} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- if .Values.dashboards }} -{{- range $provider, $dashboards := .Values.dashboards }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" -{{- end }} -{{- end }} -{{- end }} -{{- end -}} -{{- if .Values.dashboardsConfigMaps }} -{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" -{{- end }} -{{- end }} -{{- if .Values.datasources }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" - subPath: datasources.yaml -{{- end }} -{{- if .Values.notifiers }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" - subPath: notifiers.yaml -{{- end }} -{{- if .Values.dashboardProviders }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" - subPath: dashboardproviders.yaml -{{- end }} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{ if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml -{{- end}} -{{- end}} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - containerPort: {{ .Values.service.port }} - protocol: TCP - - name: {{ .Values.podPortName }} - containerPort: 3000 - protocol: TCP - env: - {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ template "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: -{{ toYaml $value | indent 10 }} - {{- end }} -{{- range $key, $value := .Values.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - {{- if .Values.envFromSecret }} - envFrom: - - secretRef: - name: {{ .Values.envFromSecret }} - {{- end }} - {{- if .Values.envRenderSecret }} - envFrom: - - secretRef: - name: {{ template "grafana.fullname" . }}-env - {{- end }} - livenessProbe: -{{ toYaml .Values.livenessProbe | indent 6 }} - readinessProbe: -{{ toYaml .Values.readinessProbe | indent 6 }} - resources: -{{ toYaml .Values.resources | indent 6 }} -{{- with .Values.extraContainers }} -{{ tpl . $ | indent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: -{{ toYaml . | indent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ template "grafana.fullname" . }} -{{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} -{{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{ $root := . }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ template "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} -{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} -{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} -# nothing -{{- else }} - - name: storage - emptyDir: {} -{{- end -}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: {} -{{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ template "grafana.fullname" . }}-config-dashboards -{{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: {} -{{- end -}} -{{- range .Values.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} -{{- end }} -{{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} -{{- end }} -{{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} -{{- end -}} -{{- end }} +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + securityContext: + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end}} +{{- end }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag}}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- end }} diff --git a/manifests/helm/monitor-stack/charts/grafana/values.yaml b/manifests/helm/monitor-stack/charts/grafana/values.yaml index 39d0bbb1e..b55ace7eb 100755 --- a/manifests/helm/monitor-stack/charts/grafana/values.yaml +++ b/manifests/helm/monitor-stack/charts/grafana/values.yaml @@ -506,4 +506,4 @@ sidecar: ## Override the deployment namespace ## -namespaceOverride: "" +namespaceOverride: "" \ No newline at end of file diff --git a/manifests/helm/monitor-stack/values.yaml b/manifests/helm/monitor-stack/values.yaml index 79cc4af17..9e97584d3 100644 --- a/manifests/helm/monitor-stack/values.yaml +++ b/manifests/helm/monitor-stack/values.yaml @@ -1,2004 +1,2004 @@ -# Default values for prometheus-operator. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -## Provide a name in place of prometheus-operator for `app:` labels -## -nameOverride: "" - -## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 -## -kubeTargetVersionOverride: "" - -## Provide a name to substitute for the full names of resources -## -fullnameOverride: "" - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -## Create default rules for monitoring the cluster -## -defaultRules: - create: true - rules: - alertmanager: true - etcd: true - general: true - k8s: true - kubeApiserver: true - kubeApiserverError: true - kubePrometheusNodeAlerting: true - kubePrometheusNodeRecording: true - kubernetesAbsent: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - kubeScheduler: true - network: true - node: true - prometheus: true - prometheusOperator: true - time: true - - ## Runbook url prefix for default rules - runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md# - ## Reduce app namespace alert scope - appNamespacesTarget: ".*" - - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - -## Provide custom recording or alerting rules to be deployed into the cluster. -## -additionalPrometheusRules: [] -# - name: my-rule-file -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## -global: - rbac: - create: true - pspEnabled: true - pspAnnotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - -## Configuration for alertmanager -## ref: https://prometheus.io/docs/alerting/alertmanager/ -## -alertmanager: - - ## Deploy alertmanager - ## - enabled: true - - ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 - ## - apiVersion: v2 - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configure pod disruption budgets for Alertmanager - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ## Alertmanager configuration directives - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - config: - global: - resolve_timeout: 5m - route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - match: - alertname: Watchdog - receiver: 'null' - receivers: - - name: 'null' - - ## Pass the Alertmanager configuration directives through Helm's templating - ## engine. If the Alertmanager configuration contains Alertmanager templates, - ## they'll need to be properly escaped so that they are not interpreted by - ## Helm - ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function - ## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E - ## https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - tplConfig: false - - ## Alertmanager template files to format alerts - ## ref: https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - ## - templateFiles: {} - # - ## An example template: - # template_1.tmpl: |- - # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} - # - # {{ define "slack.myorg.text" }} - # {{- $root := . -}} - # {{ range .Alerts }} - # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` - # *Cluster:* {{ template "cluster" $root }} - # *Description:* {{ .Annotations.description }} - # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> - # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> - # *Details:* - # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` - # {{ end }} - - ingress: - enabled: false - - annotations: {} - - labels: {} - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - alertmanager.domain.com - - ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Alertmanager Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: alertmanager-general-tls - # hosts: - # - alertmanager.example.com - - ## Configuration for Alertmanager secret - ## - secret: - annotations: {} - - ## Configuration for creating an Ingress that will map to each Alertmanager replica service - ## alertmanager.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## Secret name containing the TLS certificate for alertmanager per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} - ## - prefix: "alertmanager" - - ## Configuration for Alertmanager service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port for Alertmanager Service to listen on - ## - port: 9093 - ## To be used with a proxy extraContainer port - ## - targetPort: 9093 - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30903 - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## Configuration for creating a separate Service for each statefulset Alertmanager replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Alertmanager Service per replica to listen on - ## - port: 9093 - - ## To be used with a proxy extraContainer port - targetPort: 9093 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30904 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "loadbalancer" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## If true, create a serviceMonitor for alertmanager - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Settings affecting alertmanagerSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec - ## - alertmanagerSpec: - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. - ## - podMetadata: {} - - ## Image of Alertmanager - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.20.0 - - ## If true then the user will be responsible to provide a secret with alertmanager configuration - ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used - ## - useExistingSecret: false - - ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the - ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. - ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - ## - configMaps: [] - - ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for - ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. - ## - # configSecret: - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - logFormat: logfmt - - ## Log level for Alertmanager to be configured with. - ## - logLevel: info - - ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the - ## running cluster equal to the expected size. - replicas: 1 - - ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression - ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). - ## - retention: 120h - - ## Storage is the definition of how storage will be used by the Alertmanager instances. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - - ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false - ## - externalUrl: - - ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, - ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. - ## - routePrefix: / - - ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. - ## - paused: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Define resources requests and limits for single Pods. - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # requests: - # memory: 400Mi - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - ## - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the alertmanager instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## If specified, the pod's tolerations. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. - ## Note this is only for the Alertmanager UI, not the gossip communication. - ## - listenLocal: false - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. - ## - containers: [] - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. - ## - additionalPeers: [] - - ## PortName to use for Alert Manager. - ## - portName: "web" - - -## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml -## -grafana: - enabled: true - - ## Deploy default dashboards. - ## - defaultDashboardsEnabled: true - - adminPassword: prom-operator - - ingress: - ## If true, Grafana Ingress will be created - ## - enabled: false - - ## Annotations for Grafana Ingress - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - - ## Labels to be added to the Ingress - ## - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enable. - ## - # hosts: - # - grafana.domain.com - hosts: [] - - ## Path for grafana ingress - path: / - - ## TLS configuration for grafana Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: grafana-general-tls - # hosts: - # - grafana.example.com - - sidecar: - dashboards: - enabled: true - label: grafana_dashboard - datasources: - enabled: true - defaultDatasourceEnabled: true - - ## Annotations for Grafana datasource configmaps - ## - annotations: {} - - ## Create datasource for each Pod of Prometheus StatefulSet; - ## this uses headless service `prometheus-operated` which is - ## created by Prometheus Operator - ## ref: https://git.io/fjaBS - createPrometheusReplicasDatasources: false - label: grafana_datasource - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # configMap: certs-configmap - # readOnly: true - - ## Configure additional grafana datasources - ## ref: http://docs.grafana.org/administration/provisioning/#datasources - additionalDataSources: [] - # - name: prometheus-sample - # access: proxy - # basicAuth: true - # basicAuthPassword: pass - # basicAuthUser: daco - # editable: false - # jsonData: - # tlsSkipVerify: true - # orgId: 1 - # type: prometheus - # url: https://prometheus.svc:9090 - # version: 1 - - ## Passed to grafana subchart and used by servicemonitor below - ## - service: - portName: service - - ## If true, create a serviceMonitor for grafana - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube api server -## -kubeApiServer: - enabled: true - tlsConfig: - serverName: kubernetes - insecureSkipVerify: false - - ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - jobLabel: component - selector: - matchLabels: - component: apiserver - provider: kubernetes - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - -## Component scraping the kubelet and kubelet-hosted cAdvisor -## -kubelet: - enabled: true - namespace: kube-system - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping the kubelet over https. For requirements to enable this see - ## https://github.com/coreos/prometheus-operator/issues/926 - ## - https: true - - ## Enable scraping /metrics/cadvisor from kubelet's service - ## - cAdvisor: true - - ## Metric relabellings to apply to samples before ingestion - ## - cAdvisorMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - # metrics_path is required to match upstream rules and charts - ## - cAdvisorRelabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - metricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - # metrics_path is required to match upstream rules and charts - ## - relabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube controller manager -## -kubeControllerManager: - enabled: true - - ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeControllerManager.endpoints only the port and targetPort are used - ## - service: - port: 10252 - targetPort: 10252 - # selector: - # component: kube-controller-manager - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - # Skip TLS certificate validation when scraping - insecureSkipVerify: null - - # Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping coreDns. Use either this or kubeDns -## -coreDns: - enabled: true - service: - port: 9153 - targetPort: 9153 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping kubeDns. Use either this or coreDns -## -kubeDns: - enabled: false - service: - dnsmasq: - port: 10054 - targetPort: 10054 - skydns: - port: 10055 - targetPort: 10055 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - dnsmasqMetricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - dnsmasqRelabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping etcd -## -kubeEtcd: - enabled: true - - ## If your etcd is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used - ## - service: - port: 2379 - targetPort: 2379 - # selector: - # component: etcd - - ## Configure secure access to the etcd cluster by loading a secret into prometheus and - ## specifying security configuration below. For example, with a secret named etcd-client-cert - ## - ## serviceMonitor: - ## scheme: https - ## insecureSkipVerify: false - ## serverName: localhost - ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client - ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - scheme: http - insecureSkipVerify: false - serverName: "" - caFile: "" - certFile: "" - keyFile: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube scheduler -## -kubeScheduler: - enabled: true - - ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeScheduler.endpoints only the port and targetPort are used - ## - service: - port: 10251 - targetPort: 10251 - # selector: - # component: kube-scheduler - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## Enable scraping kube-scheduler over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## Skip TLS certificate validation when scraping - insecureSkipVerify: null - - ## Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube proxy -## -kubeProxy: - enabled: true - - ## If your kube proxy is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - service: - port: 10249 - targetPort: 10249 - # selector: - # k8s-app: kube-proxy - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-proxy over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - -## Component scraping kube state metrics -## -kubeStateMetrics: - enabled: true - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Configuration for kube-state-metrics subchart -## -kube-state-metrics: - rbac: - create: true - podSecurityPolicy: - enabled: true - -## Deploy node exporter as a daemonset to all nodes -## -nodeExporter: - enabled: true - - ## Use the value configured in prometheus-node-exporter.podLabels - ## - jobLabel: jobLabel - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. - ## - scrapeTimeout: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - sourceLabels: [__name__] - # separator: ; - # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ - # replacement: $1 - # action: drop - - ## relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Configuration for prometheus-node-exporter subchart -## -prometheus-node-exporter: - podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards - ## - jobLabel: node-exporter - extraArgs: - - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - -## Manages Prometheus and Alertmanager components -## -prometheusOperator: - enabled: true - - # If true prometheus operator will create and update its CRDs on startup - manageCrds: true - - tlsProxy: - enabled: false - image: - repository: squareup/ghostunnel - tag: v1.5.2 - pullPolicy: IfNotPresent - resources: {} - - ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted - ## rules from making their way into prometheus and potentially preventing the container from starting - admissionWebhooks: - failurePolicy: Fail - enabled: false - ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. - ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own - ## certs ahead of time if you wish. - ## - patch: - enabled: false - image: - repository: jettech/kube-webhook-certgen - tag: v1.0.0 - pullPolicy: IfNotPresent - resources: {} - ## Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - podAnnotations: {} - nodeSelector: {} - affinity: {} - tolerations: [] - - ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). - ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration - ## - namespaces: {} - # releaseNamespace: true - # additional: - # - kube-system - - ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). - ## - denyNamespaces: [] - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus operator service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30080 - - nodePortTls: 30443 - - ## Additional ports to open for Prometheus service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Service type - ## NodePort, ClusterIP, loadbalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Deploy CRDs used by Prometheus Operator. - ## - createCustomResource: true - - ## Attempt to clean up CRDs created by Prometheus Operator. - ## - cleanupCustomResource: false - - ## Labels to add to the operator pod - ## - podLabels: {} - - ## Annotations to add to the operator pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - ## If true, the operator will create and maintain a service for scraping kubelets - ## ref: https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus-operator/README.md - ## - kubeletService: - enabled: true - namespace: kube-system - - ## Create a servicemonitor for the operator - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - securityContext: - runAsNonRoot: true - runAsUser: 65534 - - ## Prometheus-operator image - ## - image: - repository: quay.io/coreos/prometheus-operator - tag: v0.37.0 - pullPolicy: IfNotPresent - - ## Configmap-reload image to use for reloading configmaps - ## - configmapReloadImage: - repository: quay.io/coreos/configmap-reload - tag: v0.0.1 - - ## Prometheus-config-reloader image to use for config and rule reloading - ## - prometheusConfigReloaderImage: - repository: quay.io/coreos/prometheus-config-reloader - tag: v0.37.0 - - ## Set the prometheus config reloader side-car CPU limit - ## - configReloaderCpu: 100m - - ## Set the prometheus config reloader side-car memory limit - ## - configReloaderMemory: 25Mi - - ## Hyperkube image to use when cleaning up - ## - hyperkubeImage: - repository: k8s.gcr.io/hyperkube - tag: v1.12.1 - pullPolicy: IfNotPresent - -## Deploy a Prometheus instance -## -prometheus: - - enabled: true - - ## Annotations for Prometheus - ## - annotations: {} - - ## Service account for Prometheuses to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port for Prometheus Service to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30090 - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - sessionAffinity: "" - - ## Configuration for creating a separate Service for each statefulset Prometheus replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Prometheus Service per replica to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30091 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "loadbalancer" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## Configure pod disruption budgets for Prometheus - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ingress: - enabled: false - annotations: {} - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## - # hosts: - # - prometheus.domain.com - hosts: [] - - ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Prometheus Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-general-tls - # hosts: - # - prometheus.example.com - - ## Configuration for creating an Ingress that will map to each Prometheus replica service - ## prometheus.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## Secret name containing the TLS certificate for Prometheus per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} - ## - prefix: "prometheus" - - ## Configure additional options for default pod security policy for Prometheus - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - podSecurityPolicy: - allowedCapabilities: [] - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Settings affecting prometheusSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec - ## - prometheusSpec: - ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos - ## - disableCompaction: false - ## APIServerConfig - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig - ## - apiserverConfig: {} - - ## Interval between consecutive scrapes. - ## - scrapeInterval: "" - - ## Interval between consecutive evaluations. - ## - evaluationInterval: "" - - ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. - ## - listenLocal: false - - ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. - ## This is disabled by default. - ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - ## - enableAdminAPI: false - - ## Image of Prometheus. - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.15.2 - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Alertmanagers to which alerts will be sent - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints - ## - ## Default configuration will connect to the alertmanager deployed as part of this release - ## - alertingEndpoints: [] - # - name: "" - # namespace: "" - # port: http - # scheme: http - # pathPrefix: "" - # tlsConfig: {} - # bearerTokenFile: "" - # apiVersion: v2 - - ## External labels to add to any time series or alerts when communicating with external systems - ## - externalLabels: {} - - ## Name of the external label used to denote replica name - ## - replicaExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote replica name - ## - replicaExternalLabelNameClear: false - - ## Name of the external label used to denote Prometheus instance name - ## - prometheusExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote Prometheus instance name - ## - prometheusExternalLabelNameClear: false - - ## External URL at which Prometheus will be reachable. - ## - externalUrl: "" - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not - ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated - ## with the new list of secrets. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ## - configMaps: [] - - ## QuerySpec defines the query command line flags when starting Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#queryspec - ## - query: {} - - ## Namespaces to be selected for PrometheusRules discovery. - ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - ruleNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the PrometheusRule resources created - ## - ruleSelectorNilUsesHelmValues: true - - ## PrometheusRules to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - ruleSelector: {} - ## Example which select all prometheusrules resources - ## with label "prometheus" with values any of "example-rules" or "example-rules-2" - # ruleSelector: - # matchExpressions: - # - key: prometheus - # operator: In - # values: - # - example-rules - # - example-rules-2 - # - ## Example which select all prometheusrules resources with label "role" set to "example-rules" - # ruleSelector: - # matchLabels: - # role: example-rules - - ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the servicemonitors created - ## - serviceMonitorSelectorNilUsesHelmValues: true - - ## ServiceMonitors to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - serviceMonitorSelector: {} - ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" - # serviceMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - serviceMonitorNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the podmonitors created - ## - podMonitorSelectorNilUsesHelmValues: true - - ## PodMonitors to be selected for target discovery. - ## If {}, select all PodMonitors - ## - podMonitorSelector: {} - ## Example which selects PodMonitors with label "prometheus" set to "somelabel" - # podMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for PodMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - podMonitorNamespaceSelector: {} - - ## How long to retain metrics - ## - retention: 10d - - ## Maximum size of metrics - ## - retentionSize: "" - - ## Enable compression of the write-ahead log using Snappy. - ## - walCompression: false - - ## If true, the Operator won't process any Prometheus configuration changes - ## - paused: false - - ## Number of Prometheus replicas desired - ## - replicas: 1 - - ## Log level for Prometheus be configured in - ## - logLevel: info - - ## Log format for Prometheus be configured in - ## - logFormat: logfmt - - ## Prefix used to register routes, overriding externalUrl route. - ## Useful for proxies that rewrite URLs. - ## - routePrefix: / - - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the prometheus pods. - ## - podMetadata: {} - # labels: - # app: prometheus - # k8s-app: prometheus - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the prometheus instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## The remote_read spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec - remoteRead: [] - # - url: http://remote1/read - - ## The remote_write spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec - remoteWrite: [] - # remoteWrite: - # - url: http://thanos-receive:19291/api/v1/receive - - ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature - remoteWriteDashboards: false - - ## Resource limits & requests - ## - resources: {} - # requests: - # memory: 400Mi - - ## Prometheus StorageSpec for persistent data - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storageSpec: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations - ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form - ## as specified in the official Prometheus documentation: - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are - ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility - ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible - ## scrape configs are going to break Prometheus after the upgrade. - ## - ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the - ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes - ## - additionalScrapeConfigs: [] - # - job_name: kube-etcd - # kubernetes_sd_configs: - # - role: node - # scheme: https - # tls_config: - # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client - # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - # relabel_configs: - # - action: labelmap - # regex: __meta_kubernetes_node_label_(.+) - # - source_labels: [__address__] - # action: replace - # targetLabel: __address__ - # regex: ([^:;]+):(\d+) - # replacement: ${1}:2379 - # - source_labels: [__meta_kubernetes_node_name] - # action: keep - # regex: .*mst.* - # - source_labels: [__meta_kubernetes_node_name] - # action: replace - # targetLabel: node - # regex: (.*) - # replacement: ${1} - # metric_relabel_configs: - # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - # action: labeldrop - - ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful - ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' - additionalPrometheusSecretsAnnotations: {} - - ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified - ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. - ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. - ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this - ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release - ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. - ## - additionalAlertManagerConfigs: [] - # - consul_sd_configs: - # - server: consul.dev.test:8500 - # scheme: http - # datacenter: dev - # tag_separator: ',' - # services: - # - metrics-prometheus-alertmanager - - ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended - ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the - ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the - ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel - ## configs are going to break Prometheus after the upgrade. - ## - additionalAlertRelabelConfigs: [] - # - separator: ; - # regex: prometheus_replica - # replacement: $1 - # action: labeldrop - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. - ## This section is experimental, it may change significantly without deprecation notice in any release. - ## This is experimental and may change significantly without backward compatibility in any release. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec - ## - # thanos: - # baseImage: thanosio/thanos - # version: v0.16.0 - # objectStorageConfig: - # key: thanos.yaml - # name: thanos-objstore-config - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. - ## if using proxy extraContainer update targetPort with proxy container port - containers: [] - - ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes - ## (permissions, dir tree) on mounted volumes before starting prometheus - initContainers: [] - - ## Enable additional scrape configs that are managed externally to this chart. Note that the prometheus - ## will fail to provision if the correct secret does not exist. - ## This option requires that you are maintaining a secret in the same namespace as Prometheus with - ## a name of 'prometheus-operator-prometheus-scrape-confg' and a key of 'additional-scrape-configs.yaml' that - ## contains a list of scrape_config's. The name of the secret may vary if you utilize the "fullnameOverride". - ## This feature cannot be used in conjunction with the additionalScrapeConfigs attribute (the helm-generated - ## secret will overwrite your self-maintained secret). - ## - ## scrape_config docs: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config - ## explanation of "confg" typo: https://github.com/helm/charts/issues/13368 - additionalScrapeConfigsExternal: false - - ## PortName to use for Prometheus. - ## - portName: "web" - - additionalServiceMonitors: [] - ## Name of the ServiceMonitor to create - ## - # - name: "" - - ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from - ## the chart - ## - # additionalLabels: {} - - ## Service label for use in assembling a job name of the form