From c5d7d0a7c4896534b2f94fa9c160a6b26e68c724 Mon Sep 17 00:00:00 2001 From: dengyunlong <375331854@qq.com> Date: Thu, 8 Dec 2022 16:22:17 +0800 Subject: [PATCH 1/9] fix: oidc auto create --- manifests/ansible/kubespray | 2 +- manifests/ansible/kubespray_2_17_0 | 2 +- .../charts/grafana/templates/_pod.tpl | 2 +- .../monitor-stack/charts/grafana/values.yaml | 33 +++++++++---------- manifests/helm/monitor-stack/values.yaml | 2 +- pkg/kubeserver/models/component_base.go | 4 +++ pkg/kubeserver/models/components_monitor.go | 5 +-- .../templates/components/monitor_promtheus.go | 2 ++ 8 files changed, 29 insertions(+), 23 deletions(-) diff --git a/manifests/ansible/kubespray b/manifests/ansible/kubespray index 514d19d1d..6cc0e8441 160000 --- a/manifests/ansible/kubespray +++ b/manifests/ansible/kubespray @@ -1 +1 @@ -Subproject commit 514d19d1d09dff13ce1066b626a1ea47bceb898d +Subproject commit 6cc0e844163718afbf543286624bb7ef35a40209 diff --git a/manifests/ansible/kubespray_2_17_0 b/manifests/ansible/kubespray_2_17_0 index 54bdfabff..49c1f245a 160000 --- a/manifests/ansible/kubespray_2_17_0 +++ b/manifests/ansible/kubespray_2_17_0 @@ -1 +1 @@ -Subproject commit 54bdfabff4a53c5383aba944c4866a008b1eb4f1 +Subproject commit 49c1f245a27c623be5ab9835de950e320ff7af51 diff --git a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl index d27abe983..9502de832 100755 --- a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl +++ b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl @@ -218,7 +218,7 @@ containers: containerPort: {{ .Values.service.port }} protocol: TCP - name: {{ .Values.podPortName }} - containerPort: 3000 + containerPort: 30000 protocol: TCP env: {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} diff --git a/manifests/helm/monitor-stack/charts/grafana/values.yaml b/manifests/helm/monitor-stack/charts/grafana/values.yaml index 39d0bbb1e..c2080d797 100755 --- a/manifests/helm/monitor-stack/charts/grafana/values.yaml +++ b/manifests/helm/monitor-stack/charts/grafana/values.yaml @@ -33,12 +33,12 @@ deploymentStrategy: readinessProbe: httpGet: path: /api/health - port: 3000 + port: 30000 livenessProbe: httpGet: path: /api/health - port: 3000 + port: 30000 initialDelaySeconds: 60 timeoutSeconds: 30 failureThreshold: 10 @@ -113,9 +113,7 @@ podPortName: grafana ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: - type: ClusterIP - port: 80 - targetPort: 3000 + port: 30000 # targetPort: 4181 To be used with a proxy extraContainer annotations: {} labels: {} @@ -382,20 +380,20 @@ dashboardsConfigMaps: {} ## ref: http://docs.grafana.org/installation/configuration/ ## grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: false +# paths: +# data: /var/lib/grafana/data +# logs: /var/log/grafana +# plugins: /var/lib/grafana/plugins +# provisioning: /etc/grafana/provisioning +# analytics: +# check_for_updates: false log: mode: console - grafana_net: - url: https://grafana.net - auth.anonymous: - enabled: true - org_role: Viewer +# grafana_net: +# url: https://grafana.net +# auth.anonymous: +# enabled: true +# org_role: Viewer ## grafana Authentication can be enabled with the following values on grafana.ini # server: # The full public facing url you use in browser, used for redirects and emails @@ -507,3 +505,4 @@ sidecar: ## Override the deployment namespace ## namespaceOverride: "" +>>>>>>> d5a85d85 (fix: oidc auto create) diff --git a/manifests/helm/monitor-stack/values.yaml b/manifests/helm/monitor-stack/values.yaml index 79cc4af17..7939556b0 100644 --- a/manifests/helm/monitor-stack/values.yaml +++ b/manifests/helm/monitor-stack/values.yaml @@ -2001,4 +2001,4 @@ prometheus: loki: enabled: true promtail: - enabled: true + enabled: true \ No newline at end of file diff --git a/pkg/kubeserver/models/component_base.go b/pkg/kubeserver/models/component_base.go index 8118412fa..ad99dec00 100644 --- a/pkg/kubeserver/models/component_base.go +++ b/pkg/kubeserver/models/component_base.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" + "yunion.io/x/jsonutils" + "yunion.io/x/log" + "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/release" @@ -146,6 +149,7 @@ func (m HelmComponentManager) CreateHelmResource( if err := m.EnsureNamespace(cluster, m.namespace); err != nil { return errors.Wrapf(err, "%s ensure namespace %q", m.releaseName, m.namespace) } + log.Errorf("**** helm install vals: %s", jsonutils.Marshal(vals).PrettyString()) if _, err := m.HelmInstall(cluster, m.namespace, m.embedChartName, m.releaseName, vals); err != nil { return errors.Wrapf(err, "create helm %s release", m.releaseName) } diff --git a/pkg/kubeserver/models/components_monitor.go b/pkg/kubeserver/models/components_monitor.go index bf65b7f21..b66515126 100644 --- a/pkg/kubeserver/models/components_monitor.go +++ b/pkg/kubeserver/models/components_monitor.go @@ -29,7 +29,8 @@ var ( ) const ( - MonitorNamespace = "onecloud-monitoring" + //MonitorNamespace = "onecloud-monitoring" + MonitorNamespace = "kube-monitoring" MonitorReleaseName = "monitor" ThanosObjectStoreConfigSecretName = "thanos-objstore-config" ThanosObjectStoreConfigSecretKey = "thanos.yaml" @@ -40,7 +41,7 @@ const ( func init() { MonitorComponentManager = NewMonitorComponentManager() - ComponentManager.RegisterDriver(newComponentDriverMonitor()) + //ComponentManager.RegisterDriver(newComponentDriverMonitor()) } type SMonitorComponentManager struct { diff --git a/pkg/kubeserver/templates/components/monitor_promtheus.go b/pkg/kubeserver/templates/components/monitor_promtheus.go index 81a1a9ccf..b01757373 100644 --- a/pkg/kubeserver/templates/components/monitor_promtheus.go +++ b/pkg/kubeserver/templates/components/monitor_promtheus.go @@ -198,6 +198,8 @@ type GrafanaIniServer struct { ServeFromSubPath bool `json:"serve_from_sub_path"` Domain string `json:"domain,omitempty"` EnforceDomain bool `json:"enforce_domain,omitempty"` + HttpPort string `json:"http_port"` + Protocol string `json:"protocol"` } type GrafanaIniOAuth struct { From de0d582c212349703cecacf77b4458e2e39d137e Mon Sep 17 00:00:00 2001 From: dengyunlong <375331854@qq.com> Date: Mon, 12 Dec 2022 10:43:44 +0800 Subject: [PATCH 2/9] create oidc secret --- .../templates/components/monitor_promtheus.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pkg/kubeserver/templates/components/monitor_promtheus.go b/pkg/kubeserver/templates/components/monitor_promtheus.go index b01757373..c5102b456 100644 --- a/pkg/kubeserver/templates/components/monitor_promtheus.go +++ b/pkg/kubeserver/templates/components/monitor_promtheus.go @@ -223,10 +223,27 @@ type GrafanaIniDatabase struct { Password string `json:"password"` } +type GrafanaIniUsers struct { + DefaultTheme string `json:"default_theme"` +} + +type GrafanaIniSecurity struct { + CookieSamesite string `json:"cookie_samesite"` + CookieSecure bool `json:"cookie_secure"` + AllowEmbedding bool `json:"allow_embedding"` +} + +type GrafanaIniAuth struct { + LoginCookieName string `json:"login_cookie_name"` +} + type GrafanaIni struct { Server *GrafanaIniServer `json:"server"` OAuth *GrafanaIniOAuth `json:"auth.generic_oauth"` Database *GrafanaIniDatabase `json:"database"` + Users *GrafanaIniUsers `json:"users"` + Security *GrafanaIniSecurity `json:"security"` + Auth *GrafanaIniAuth `json:"auth"` } type GrafanaDataSourceJsonData struct { From 10328ef5ab9c813cebb1ed0ffcd28cb5d4136812 Mon Sep 17 00:00:00 2001 From: dengyunlong <375331854@qq.com> Date: Wed, 14 Dec 2022 09:10:15 +0800 Subject: [PATCH 3/9] fix:oidc duplicate create --- pkg/kubeserver/api/component.go | 4 +- pkg/kubeserver/models/components.go | 9 +- pkg/kubeserver/models/components_monitor.go | 152 +++++++++++++----- pkg/kubeserver/tasks/component_deploy_task.go | 17 +- .../templates/components/monitor_promtheus.go | 26 +-- 5 files changed, 148 insertions(+), 60 deletions(-) diff --git a/pkg/kubeserver/api/component.go b/pkg/kubeserver/api/component.go index 8536cbf48..692fdde3e 100644 --- a/pkg/kubeserver/api/component.go +++ b/pkg/kubeserver/api/component.go @@ -1,6 +1,8 @@ package api -import "yunion.io/x/onecloud/pkg/apis" +import ( + "yunion.io/x/onecloud/pkg/apis" +) const ( ClusterComponentCephCSI = "cephCSI" diff --git a/pkg/kubeserver/models/components.go b/pkg/kubeserver/models/components.go index b36ab83e8..8228253e2 100644 --- a/pkg/kubeserver/models/components.go +++ b/pkg/kubeserver/models/components.go @@ -5,12 +5,12 @@ import ( "crypto/md5" "encoding/hex" "fmt" - "reflect" - "gopkg.in/yaml.v2" "k8s.io/apimachinery/pkg/api/resource" - + "reflect" "yunion.io/x/jsonutils" + "yunion.io/x/kubecomps/pkg/kubeserver/api" + "yunion.io/x/kubecomps/pkg/kubeserver/drivers" "yunion.io/x/log" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/lockman" @@ -21,9 +21,6 @@ import ( "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/utils" "yunion.io/x/sqlchemy" - - "yunion.io/x/kubecomps/pkg/kubeserver/api" - "yunion.io/x/kubecomps/pkg/kubeserver/drivers" ) var ( diff --git a/pkg/kubeserver/models/components_monitor.go b/pkg/kubeserver/models/components_monitor.go index b66515126..4774e88ad 100644 --- a/pkg/kubeserver/models/components_monitor.go +++ b/pkg/kubeserver/models/components_monitor.go @@ -2,7 +2,12 @@ package models import ( "context" + "encoding/base64" "fmt" + "time" + "yunion.io/x/onecloud/pkg/mcclient/modules" + "yunion.io/x/onecloud/pkg/mcclient/modules/identity" + "yunion.io/x/pkg/util/seclib" "github.com/minio/minio-go/v7" "k8s.io/api/core/v1" @@ -41,7 +46,7 @@ const ( func init() { MonitorComponentManager = NewMonitorComponentManager() - //ComponentManager.RegisterDriver(newComponentDriverMonitor()) + ComponentManager.RegisterDriver(newComponentDriverMonitor()) } type SMonitorComponentManager struct { @@ -394,6 +399,59 @@ func (c componentDriverMonitor) FetchStatus(cluster *SCluster, comp *SComponent, return nil } +func (m SMonitorComponentManager) CreateOIDCSecret(cluster *SCluster, uid string, pid string) (*identity.SOpenIDConnectCredential, error) { + grafanaHost, err := m.GetGrafanaHost(cluster) + if err != nil { + return nil, err + } + serverDomain := options.Options.ApiServer + redirectUrl := fmt.Sprintf("%s/grafana-proxy/%s/login/generic_oauth", serverDomain, grafanaHost) + s, err := GetClusterManager().GetSession() + if err != nil { + return nil, err + } + Credentials := &identity.SCredentialManager{ + ResourceManager: modules.NewIdentityV3Manager("credential", "credentials", + []string{}, + []string{"ID", "Type", "user_id", "project_id", "blob"}), + } + oidcCred := &identity.SOpenIDConnectCredential{} + oidcCred.Secret = base64.URLEncoding.EncodeToString([]byte(seclib.RandomPassword(32))) + oidcCred.RedirectUri = redirectUrl + blobJson := jsonutils.Marshal(&oidcCred) + params := jsonutils.NewDict() + name := fmt.Sprintf("oidc-%s-%s-%d", uid, pid, time.Now().Unix()) + if len(pid) > 0 { + params.Add(jsonutils.NewString(pid), "project_id") + } + params.Add(jsonutils.NewString(identity.OIDC_CREDENTIAL_TYPE), "type") + if len(uid) > 0 { + params.Add(jsonutils.NewString(uid), "user_id") + } + params.Add(jsonutils.NewString(blobJson.String()), "blob") + params.Add(jsonutils.NewString(name), "name") + result, err := Credentials.Create(s, params) + if err != nil { + return oidcCred, err + } + oidcCred.ClientId, err = result.GetString("id") + return oidcCred, err +} + +func (m SMonitorComponentManager) GetGrafanaHost(cluster *SCluster) (grafanaHost string, err error) { + grafanaEip, err := cluster.GetAPIServerPublicEndpoint() + if err != nil { + fmt.Println("k8s cluster no eip", err) + return "", err + } + grafanaHost = fmt.Sprintf("%s:%s", grafanaEip, m.GetGrafanaPort()) + return +} + +func (m SMonitorComponentManager) GetGrafanaPort() string { + return "30000" +} + func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api.ComponentSettings) (map[string]interface{}, error) { imgRepo, err := cluster.GetImageRepository() if err != nil { @@ -413,48 +471,60 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. Tag: tag, } } - grafanaHost := input.Grafana.Host - if grafanaHost == "" { - grafanaHost = input.Grafana.PublicAddress + grafanaMi := func(name, tag string) components.Image { + return components.Image{ + Repository: fmt.Sprintf("%s/%s", "hb.grgbanking.com/open/grafana", name), + Tag: tag, + } } - - grafanaProto := "https" - rootUrl := fmt.Sprintf("%s://%s", grafanaProto, grafanaHost) - serveSubPath := false - grafanaIni := &components.GrafanaIni{ - Server: &components.GrafanaIniServer{}, + serverDomain := options.Options.ApiServer + grafanaEip, err := cluster.GetAPIServerPublicEndpoint() + if err != nil { + return nil, err } - if !input.Grafana.DisableSubpath { - serveSubPath = true - subpath := input.Grafana.Subpath - if subpath == "" { - subpath = "grafana" - } - rootUrl = fmt.Sprintf("%s/%s/", rootUrl, subpath) + grafanaHost, err := m.GetGrafanaHost(cluster) + if err != nil { + return nil, err } - grafanaIni.Server.ServeFromSubPath = serveSubPath - grafanaIni.Server.RootUrl = rootUrl - if input.Grafana.EnforceDomain { - grafanaIni.Server.Domain = grafanaHost - grafanaIni.Server.EnforceDomain = true - } - - if input.Grafana.OAuth != nil { - oauth := input.Grafana.OAuth - grafanaIni.OAuth = &components.GrafanaIniOAuth{ - Enabled: oauth.Enabled, - ClientId: oauth.ClientId, - ClientSecret: oauth.ClientSecret, - Scopes: oauth.Scopes, - AuthURL: oauth.AuthURL, - TokenURL: oauth.TokenURL, - APIURL: oauth.APIURL, - AllowedDomains: oauth.AllowedDomains, - AllowSignUp: oauth.AllowSignUp, - RoleAttributePath: oauth.RoleAttributePath, - } + rootUrl := fmt.Sprintf("%s/grafana-proxy/%s", serverDomain, grafanaHost) + grafanaIni := &components.GrafanaIni{ + Server: &components.GrafanaIniServer{}, + OAuth: &components.GrafanaIniOAuth{}, + Users: &components.GrafanaIniUsers{}, + Security: &components.GrafanaIniSecurity{}, + Auth: &components.GrafanaIniAuth{}, } + grafanaIni.Auth.LoginCookieName = "grafana_session_721" + + grafanaIni.Server.ServeFromSubPath = true + grafanaIni.Server.RootUrl = rootUrl + grafanaIni.Server.EnforceDomain = true + grafanaIni.Server.Domain = grafanaEip + grafanaIni.Server.HttpPort = m.GetGrafanaPort() + grafanaIni.Server.Protocol = "http" + + grafanaIni.Security.CookieSecure = true + grafanaIni.Security.CookieSamesite = "none" + grafanaIni.Security.AllowEmbedding = true + + grafanaIni.Users.DefaultTheme = "light" + + if setting.Monitor.Grafana.OAuth != nil { + grafanaIni.OAuth.ClientId = setting.Monitor.Grafana.OAuth.ClientId + grafanaIni.OAuth.ClientSecret = setting.Monitor.Grafana.OAuth.ClientSecret + } + + grafanaIni.OAuth.Enabled = true + grafanaIni.OAuth.Scopes = "user profile" + grafanaIni.OAuth.IdTokenAttributeName = "data.id" + grafanaIni.OAuth.AuthURL = fmt.Sprintf("%s/api/v1/auth/oidc/auth", serverDomain) + grafanaIni.OAuth.TokenURL = fmt.Sprintf("%s/api/v1/auth/oidc/token", serverDomain) + grafanaIni.OAuth.APIURL = fmt.Sprintf("%s/api/v1/auth/oidc/user", serverDomain) + grafanaIni.OAuth.RoleAttributePath = "projectName == 'system' && contains(roles, 'admin') && 'Admin' || 'Editor'" + grafanaIni.OAuth.TlsSkipVerifyInsecure = true + grafanaIni.OAuth.AllowAssignGrafanaAdmin = true + grafanaIni.OAuth.AllowSignUp = true if input.Grafana.DB != nil { db := input.Grafana.DB if db.Host == "" { @@ -540,9 +610,11 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. DefaultDatasourceEnabled: true, }, }, - Image: mi("grafana", "6.7.1"), + Image: grafanaMi("grafana", "7.2.1-zh"), + //Image: mi("grafana", "6.7.1"), Service: &components.Service{ - Type: string(v1.ServiceTypeClusterIP), + Type: string(v1.ServiceTypeNodePort), + NodePort: m.GetGrafanaPort(), }, Ingress: &components.GrafanaIngress{ Enabled: true, diff --git a/pkg/kubeserver/tasks/component_deploy_task.go b/pkg/kubeserver/tasks/component_deploy_task.go index 1097f0cd7..655d2e3f7 100644 --- a/pkg/kubeserver/tasks/component_deploy_task.go +++ b/pkg/kubeserver/tasks/component_deploy_task.go @@ -3,9 +3,8 @@ package tasks import ( "context" "fmt" - "yunion.io/x/kubecomps/pkg/kubeserver/models" - "yunion.io/x/jsonutils" + "yunion.io/x/kubecomps/pkg/kubeserver/models" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" @@ -37,6 +36,20 @@ func (t *ComponentDeployTask) OnInit(ctx context.Context, obj db.IStandaloneMode if err != nil { return nil, err } + + if settings.Namespace == models.MonitorNamespace { + // create oidc secret + secret, err := models.MonitorComponentManager.CreateOIDCSecret(cluster, "", "") + if err != nil { + return nil, err + } + if settings.Monitor.Grafana.OAuth == nil { + settings.Monitor.Grafana.OAuth = &api.ComponentSettingMonitorGrafanaOAuth{} + } + settings.Monitor.Grafana.OAuth.ClientId = secret.ClientId + settings.Monitor.Grafana.OAuth.ClientSecret = secret.SAccessKeySecretBlob.Secret + } + if err := drv.DoEnable(cluster, settings); err != nil { return nil, err } diff --git a/pkg/kubeserver/templates/components/monitor_promtheus.go b/pkg/kubeserver/templates/components/monitor_promtheus.go index c5102b456..6be784fe1 100644 --- a/pkg/kubeserver/templates/components/monitor_promtheus.go +++ b/pkg/kubeserver/templates/components/monitor_promtheus.go @@ -181,7 +181,8 @@ func NewPVCStorage(storage *api.ComponentStorage) (*Storage, error) { } type Service struct { - Type string `json:"type"` + Type string `json:"type"` + NodePort string `json:"nodePort"` } type GrafanaIngress struct { @@ -203,16 +204,19 @@ type GrafanaIniServer struct { } type GrafanaIniOAuth struct { - Enabled bool `json:"enabled"` - ClientId string `json:"client_id"` - ClientSecret string `json:"client_secret"` - Scopes string `json:"scopes"` - AuthURL string `json:"auth_url"` - TokenURL string `json:"token_url"` - APIURL string `json:"api_url"` - AllowedDomains string `json:"allowed_domains"` - AllowSignUp bool `json:"allow_sign_up"` - RoleAttributePath string `json:"role_attribute_path"` + Enabled bool `json:"enabled"` + ClientId string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Scopes string `json:"scopes"` + AuthURL string `json:"auth_url"` + TokenURL string `json:"token_url"` + APIURL string `json:"api_url"` + AllowedDomains string `json:"allowed_domains"` + AllowSignUp bool `json:"allow_sign_up"` + RoleAttributePath string `json:"role_attribute_path"` + IdTokenAttributeName string `json:"id_token_attribute_name"` + TlsSkipVerifyInsecure bool `json:"tls_skip_verify_insecure"` + AllowAssignGrafanaAdmin bool `json:"allow_assign_grafana_admin"` } type GrafanaIniDatabase struct { From 66e6c34c6ca1c35e1e9e8829f4e25d3960f3e2f3 Mon Sep 17 00:00:00 2001 From: dylgo <40930623+dylgo@users.noreply.github.com> Date: Wed, 14 Dec 2022 16:13:46 +0800 Subject: [PATCH 4/9] Update values.yaml --- manifests/helm/monitor-stack/charts/grafana/values.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/manifests/helm/monitor-stack/charts/grafana/values.yaml b/manifests/helm/monitor-stack/charts/grafana/values.yaml index c2080d797..adddf00bf 100755 --- a/manifests/helm/monitor-stack/charts/grafana/values.yaml +++ b/manifests/helm/monitor-stack/charts/grafana/values.yaml @@ -505,4 +505,3 @@ sidecar: ## Override the deployment namespace ## namespaceOverride: "" ->>>>>>> d5a85d85 (fix: oidc auto create) From 1c1d07f5a19d1b6494c78cd84ee0b3f3eb6786b4 Mon Sep 17 00:00:00 2001 From: dylgo <40930623+dylgo@users.noreply.github.com> Date: Wed, 14 Dec 2022 16:34:55 +0800 Subject: [PATCH 5/9] fix:use yunion image --- pkg/kubeserver/models/components_monitor.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/pkg/kubeserver/models/components_monitor.go b/pkg/kubeserver/models/components_monitor.go index 4774e88ad..5b68a16fe 100644 --- a/pkg/kubeserver/models/components_monitor.go +++ b/pkg/kubeserver/models/components_monitor.go @@ -471,12 +471,6 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. Tag: tag, } } - grafanaMi := func(name, tag string) components.Image { - return components.Image{ - Repository: fmt.Sprintf("%s/%s", "hb.grgbanking.com/open/grafana", name), - Tag: tag, - } - } serverDomain := options.Options.ApiServer grafanaEip, err := cluster.GetAPIServerPublicEndpoint() if err != nil { @@ -610,8 +604,7 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. DefaultDatasourceEnabled: true, }, }, - Image: grafanaMi("grafana", "7.2.1-zh"), - //Image: mi("grafana", "6.7.1"), + Image: mi("grafana", "6.7.1"), Service: &components.Service{ Type: string(v1.ServiceTypeNodePort), NodePort: m.GetGrafanaPort(), From 987f155761240fad00cc9ddef2856498d5fc5310 Mon Sep 17 00:00:00 2001 From: dengyunlong <375331854@qq.com> Date: Thu, 15 Dec 2022 14:48:59 +0800 Subject: [PATCH 6/9] fix:helm init value --- .../charts/grafana/templates/_pod.tpl | 2 +- .../monitor-stack/charts/grafana/values.yaml | 33 ++++++++++--------- pkg/kubeserver/models/component_base.go | 4 --- pkg/kubeserver/models/components_monitor.go | 13 ++------ 4 files changed, 20 insertions(+), 32 deletions(-) diff --git a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl index 9502de832..d27abe983 100755 --- a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl +++ b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl @@ -218,7 +218,7 @@ containers: containerPort: {{ .Values.service.port }} protocol: TCP - name: {{ .Values.podPortName }} - containerPort: 30000 + containerPort: 3000 protocol: TCP env: {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} diff --git a/manifests/helm/monitor-stack/charts/grafana/values.yaml b/manifests/helm/monitor-stack/charts/grafana/values.yaml index c2080d797..39d0bbb1e 100755 --- a/manifests/helm/monitor-stack/charts/grafana/values.yaml +++ b/manifests/helm/monitor-stack/charts/grafana/values.yaml @@ -33,12 +33,12 @@ deploymentStrategy: readinessProbe: httpGet: path: /api/health - port: 30000 + port: 3000 livenessProbe: httpGet: path: /api/health - port: 30000 + port: 3000 initialDelaySeconds: 60 timeoutSeconds: 30 failureThreshold: 10 @@ -113,7 +113,9 @@ podPortName: grafana ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: - port: 30000 + type: ClusterIP + port: 80 + targetPort: 3000 # targetPort: 4181 To be used with a proxy extraContainer annotations: {} labels: {} @@ -380,20 +382,20 @@ dashboardsConfigMaps: {} ## ref: http://docs.grafana.org/installation/configuration/ ## grafana.ini: -# paths: -# data: /var/lib/grafana/data -# logs: /var/log/grafana -# plugins: /var/lib/grafana/plugins -# provisioning: /etc/grafana/provisioning -# analytics: -# check_for_updates: false + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: false log: mode: console -# grafana_net: -# url: https://grafana.net -# auth.anonymous: -# enabled: true -# org_role: Viewer + grafana_net: + url: https://grafana.net + auth.anonymous: + enabled: true + org_role: Viewer ## grafana Authentication can be enabled with the following values on grafana.ini # server: # The full public facing url you use in browser, used for redirects and emails @@ -505,4 +507,3 @@ sidecar: ## Override the deployment namespace ## namespaceOverride: "" ->>>>>>> d5a85d85 (fix: oidc auto create) diff --git a/pkg/kubeserver/models/component_base.go b/pkg/kubeserver/models/component_base.go index ad99dec00..8118412fa 100644 --- a/pkg/kubeserver/models/component_base.go +++ b/pkg/kubeserver/models/component_base.go @@ -5,9 +5,6 @@ import ( "fmt" "strings" - "yunion.io/x/jsonutils" - "yunion.io/x/log" - "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/release" @@ -149,7 +146,6 @@ func (m HelmComponentManager) CreateHelmResource( if err := m.EnsureNamespace(cluster, m.namespace); err != nil { return errors.Wrapf(err, "%s ensure namespace %q", m.releaseName, m.namespace) } - log.Errorf("**** helm install vals: %s", jsonutils.Marshal(vals).PrettyString()) if _, err := m.HelmInstall(cluster, m.namespace, m.embedChartName, m.releaseName, vals); err != nil { return errors.Wrapf(err, "create helm %s release", m.releaseName) } diff --git a/pkg/kubeserver/models/components_monitor.go b/pkg/kubeserver/models/components_monitor.go index 4774e88ad..c94ee0897 100644 --- a/pkg/kubeserver/models/components_monitor.go +++ b/pkg/kubeserver/models/components_monitor.go @@ -34,8 +34,7 @@ var ( ) const ( - //MonitorNamespace = "onecloud-monitoring" - MonitorNamespace = "kube-monitoring" + MonitorNamespace = "onecloud-monitoring" MonitorReleaseName = "monitor" ThanosObjectStoreConfigSecretName = "thanos-objstore-config" ThanosObjectStoreConfigSecretKey = "thanos.yaml" @@ -441,7 +440,6 @@ func (m SMonitorComponentManager) CreateOIDCSecret(cluster *SCluster, uid string func (m SMonitorComponentManager) GetGrafanaHost(cluster *SCluster) (grafanaHost string, err error) { grafanaEip, err := cluster.GetAPIServerPublicEndpoint() if err != nil { - fmt.Println("k8s cluster no eip", err) return "", err } grafanaHost = fmt.Sprintf("%s:%s", grafanaEip, m.GetGrafanaPort()) @@ -471,12 +469,6 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. Tag: tag, } } - grafanaMi := func(name, tag string) components.Image { - return components.Image{ - Repository: fmt.Sprintf("%s/%s", "hb.grgbanking.com/open/grafana", name), - Tag: tag, - } - } serverDomain := options.Options.ApiServer grafanaEip, err := cluster.GetAPIServerPublicEndpoint() if err != nil { @@ -610,8 +602,7 @@ func (m SMonitorComponentManager) GetHelmValues(cluster *SCluster, setting *api. DefaultDatasourceEnabled: true, }, }, - Image: grafanaMi("grafana", "7.2.1-zh"), - //Image: mi("grafana", "6.7.1"), + Image: mi("grafana", "6.7.1"), Service: &components.Service{ Type: string(v1.ServiceTypeNodePort), NodePort: m.GetGrafanaPort(), From 359200fa67f779b4ec51dd2f3a90150cf4fa3f94 Mon Sep 17 00:00:00 2001 From: dengyunlong <375331854@qq.com> Date: Thu, 15 Dec 2022 14:52:28 +0800 Subject: [PATCH 7/9] fix:merge error --- .../charts/grafana/templates/_pod.tpl | 742 +-- .../monitor-stack/charts/grafana/values.yaml | 1018 ++--- manifests/helm/monitor-stack/values.yaml | 4006 ++++++++--------- pkg/kubeserver/models/component_base.go | 446 +- pkg/kubeserver/models/components_monitor.go | 1918 ++++---- 5 files changed, 4065 insertions(+), 4065 deletions(-) diff --git a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl index d27abe983..9b4076f10 100755 --- a/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl +++ b/manifests/helm/monitor-stack/charts/grafana/templates/_pod.tpl @@ -1,371 +1,371 @@ -{{- define "grafana.pod" -}} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -serviceAccountName: {{ template "grafana.serviceAccountName" . }} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -{{- if .Values.securityContext }} -securityContext: -{{ toYaml .Values.securityContext | indent 2 }} -{{- end }} -{{- if .Values.priorityClassName }} -priorityClassName: {{ .Values.priorityClassName }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - securityContext: - runAsUser: 0 - command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] - resources: -{{ toYaml .Values.initChownData.resources | indent 6 }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] - resources: -{{ toYaml .Values.downloadDashboards.resources | indent 6 }} - env: -{{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: {{ template "grafana.name" . }}-sc-datasources - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.datasources.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.extraInitContainers }} -{{ toYaml .Values.extraInitContainers | indent 2 }} -{{- end }} -{{- if .Values.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end}} -{{- end }} -containers: -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ template "grafana.name" . }}-sc-dashboard - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag}}" - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: "both" - {{- if .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.dashboards.searchNamespace }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{- end}} - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . }} - {{- end }} - {{- end}} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- if .Values.dashboards }} -{{- range $provider, $dashboards := .Values.dashboards }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" -{{- end }} -{{- end }} -{{- end }} -{{- end -}} -{{- if .Values.dashboardsConfigMaps }} -{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" -{{- end }} -{{- end }} -{{- if .Values.datasources }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" - subPath: datasources.yaml -{{- end }} -{{- if .Values.notifiers }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" - subPath: notifiers.yaml -{{- end }} -{{- if .Values.dashboardProviders }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" - subPath: dashboardproviders.yaml -{{- end }} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{ if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml -{{- end}} -{{- end}} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - containerPort: {{ .Values.service.port }} - protocol: TCP - - name: {{ .Values.podPortName }} - containerPort: 3000 - protocol: TCP - env: - {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ template "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: -{{ toYaml $value | indent 10 }} - {{- end }} -{{- range $key, $value := .Values.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} - {{- if .Values.envFromSecret }} - envFrom: - - secretRef: - name: {{ .Values.envFromSecret }} - {{- end }} - {{- if .Values.envRenderSecret }} - envFrom: - - secretRef: - name: {{ template "grafana.fullname" . }}-env - {{- end }} - livenessProbe: -{{ toYaml .Values.livenessProbe | indent 6 }} - readinessProbe: -{{ toYaml .Values.readinessProbe | indent 6 }} - resources: -{{ toYaml .Values.resources | indent 6 }} -{{- with .Values.extraContainers }} -{{ tpl . $ | indent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: -{{ toYaml . | indent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ template "grafana.fullname" . }} -{{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} -{{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{ $root := . }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ template "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} -{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} -{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} -# nothing -{{- else }} - - name: storage - emptyDir: {} -{{- end -}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: {} -{{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ template "grafana.fullname" . }}-config-dashboards -{{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: {} -{{- end -}} -{{- range .Values.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} -{{- end }} -{{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} -{{- end }} -{{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} -{{- end -}} -{{- end }} +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + securityContext: + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end}} +{{- end }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag}}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- end }} diff --git a/manifests/helm/monitor-stack/charts/grafana/values.yaml b/manifests/helm/monitor-stack/charts/grafana/values.yaml index 39d0bbb1e..ebf1c923f 100755 --- a/manifests/helm/monitor-stack/charts/grafana/values.yaml +++ b/manifests/helm/monitor-stack/charts/grafana/values.yaml @@ -1,509 +1,509 @@ -rbac: - create: true - pspEnabled: true - pspUseAppArmor: true - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: -# annotations: - -replicas: 1 - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - repository: grafana/grafana - tag: 6.7.1 - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: "bats/bats" - tag: "v1.1.0" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsUser: 472 - fsGroup: 472 - - -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - repository: curlimages/curl - tag: 7.68.0 - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - resources: {} - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana - -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - annotations: {} - labels: {} - portName: service - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - # type: ClusterIP - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - host: chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # subPath: "" - # existingClaim: - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the prometheus-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## - image: - repository: busybox - tag: "1.31.1" - pullPolicy: IfNotPresent - - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Extra environment variables that will be pass onto deployment pods -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc -envRenderSecret: {} - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume - # mountPath: /mnt/volume - # readOnly: true - # existingClaim: volume-claim - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # b64content: true - -## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: false - log: - mode: console - grafana_net: - url: https://grafana.net - auth.anonymous: - enabled: true - org_role: Viewer -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - repository: kiwigrid/k8s-sidecar - tag: 0.1.99 - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - dashboards: - enabled: false - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # If specified, the sidecar will search for dashboard config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - datasources: - enabled: false - # label that the configmaps with datasources are marked with - label: grafana_datasource - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - -## Override the deployment namespace -## -namespaceOverride: "" +rbac: + create: true + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: + +replicas: 1 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + tag: 6.7.1 + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "bats/bats" + tag: "v1.1.0" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsUser: 472 + fsGroup: 472 + + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: curlimages/curl + tag: 7.68.0 + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + host: chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # subPath: "" + # existingClaim: + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: busybox + tag: "1.31.1" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /mnt/volume + # readOnly: true + # existingClaim: volume-claim + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: false + log: + mode: console + grafana_net: + url: https://grafana.net + auth.anonymous: + enabled: true + org_role: Viewer +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: kiwigrid/k8s-sidecar + tag: 0.1.99 + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + +## Override the deployment namespace +## +namespaceOverride: "" diff --git a/manifests/helm/monitor-stack/values.yaml b/manifests/helm/monitor-stack/values.yaml index 7939556b0..9e97584d3 100644 --- a/manifests/helm/monitor-stack/values.yaml +++ b/manifests/helm/monitor-stack/values.yaml @@ -1,2004 +1,2004 @@ -# Default values for prometheus-operator. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -## Provide a name in place of prometheus-operator for `app:` labels -## -nameOverride: "" - -## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 -## -kubeTargetVersionOverride: "" - -## Provide a name to substitute for the full names of resources -## -fullnameOverride: "" - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -## Create default rules for monitoring the cluster -## -defaultRules: - create: true - rules: - alertmanager: true - etcd: true - general: true - k8s: true - kubeApiserver: true - kubeApiserverError: true - kubePrometheusNodeAlerting: true - kubePrometheusNodeRecording: true - kubernetesAbsent: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - kubeScheduler: true - network: true - node: true - prometheus: true - prometheusOperator: true - time: true - - ## Runbook url prefix for default rules - runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md# - ## Reduce app namespace alert scope - appNamespacesTarget: ".*" - - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - -## Provide custom recording or alerting rules to be deployed into the cluster. -## -additionalPrometheusRules: [] -# - name: my-rule-file -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## -global: - rbac: - create: true - pspEnabled: true - pspAnnotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - -## Configuration for alertmanager -## ref: https://prometheus.io/docs/alerting/alertmanager/ -## -alertmanager: - - ## Deploy alertmanager - ## - enabled: true - - ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 - ## - apiVersion: v2 - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configure pod disruption budgets for Alertmanager - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ## Alertmanager configuration directives - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - config: - global: - resolve_timeout: 5m - route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - match: - alertname: Watchdog - receiver: 'null' - receivers: - - name: 'null' - - ## Pass the Alertmanager configuration directives through Helm's templating - ## engine. If the Alertmanager configuration contains Alertmanager templates, - ## they'll need to be properly escaped so that they are not interpreted by - ## Helm - ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function - ## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E - ## https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - tplConfig: false - - ## Alertmanager template files to format alerts - ## ref: https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - ## - templateFiles: {} - # - ## An example template: - # template_1.tmpl: |- - # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} - # - # {{ define "slack.myorg.text" }} - # {{- $root := . -}} - # {{ range .Alerts }} - # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` - # *Cluster:* {{ template "cluster" $root }} - # *Description:* {{ .Annotations.description }} - # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> - # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> - # *Details:* - # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` - # {{ end }} - - ingress: - enabled: false - - annotations: {} - - labels: {} - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - alertmanager.domain.com - - ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Alertmanager Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: alertmanager-general-tls - # hosts: - # - alertmanager.example.com - - ## Configuration for Alertmanager secret - ## - secret: - annotations: {} - - ## Configuration for creating an Ingress that will map to each Alertmanager replica service - ## alertmanager.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## Secret name containing the TLS certificate for alertmanager per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} - ## - prefix: "alertmanager" - - ## Configuration for Alertmanager service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port for Alertmanager Service to listen on - ## - port: 9093 - ## To be used with a proxy extraContainer port - ## - targetPort: 9093 - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30903 - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## Configuration for creating a separate Service for each statefulset Alertmanager replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Alertmanager Service per replica to listen on - ## - port: 9093 - - ## To be used with a proxy extraContainer port - targetPort: 9093 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30904 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "loadbalancer" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## If true, create a serviceMonitor for alertmanager - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Settings affecting alertmanagerSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec - ## - alertmanagerSpec: - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. - ## - podMetadata: {} - - ## Image of Alertmanager - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.20.0 - - ## If true then the user will be responsible to provide a secret with alertmanager configuration - ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used - ## - useExistingSecret: false - - ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the - ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. - ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - ## - configMaps: [] - - ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for - ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. - ## - # configSecret: - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - logFormat: logfmt - - ## Log level for Alertmanager to be configured with. - ## - logLevel: info - - ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the - ## running cluster equal to the expected size. - replicas: 1 - - ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression - ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). - ## - retention: 120h - - ## Storage is the definition of how storage will be used by the Alertmanager instances. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - - ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false - ## - externalUrl: - - ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, - ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. - ## - routePrefix: / - - ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. - ## - paused: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Define resources requests and limits for single Pods. - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # requests: - # memory: 400Mi - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - ## - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the alertmanager instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## If specified, the pod's tolerations. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. - ## Note this is only for the Alertmanager UI, not the gossip communication. - ## - listenLocal: false - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. - ## - containers: [] - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. - ## - additionalPeers: [] - - ## PortName to use for Alert Manager. - ## - portName: "web" - - -## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml -## -grafana: - enabled: true - - ## Deploy default dashboards. - ## - defaultDashboardsEnabled: true - - adminPassword: prom-operator - - ingress: - ## If true, Grafana Ingress will be created - ## - enabled: false - - ## Annotations for Grafana Ingress - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - - ## Labels to be added to the Ingress - ## - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enable. - ## - # hosts: - # - grafana.domain.com - hosts: [] - - ## Path for grafana ingress - path: / - - ## TLS configuration for grafana Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: grafana-general-tls - # hosts: - # - grafana.example.com - - sidecar: - dashboards: - enabled: true - label: grafana_dashboard - datasources: - enabled: true - defaultDatasourceEnabled: true - - ## Annotations for Grafana datasource configmaps - ## - annotations: {} - - ## Create datasource for each Pod of Prometheus StatefulSet; - ## this uses headless service `prometheus-operated` which is - ## created by Prometheus Operator - ## ref: https://git.io/fjaBS - createPrometheusReplicasDatasources: false - label: grafana_datasource - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # configMap: certs-configmap - # readOnly: true - - ## Configure additional grafana datasources - ## ref: http://docs.grafana.org/administration/provisioning/#datasources - additionalDataSources: [] - # - name: prometheus-sample - # access: proxy - # basicAuth: true - # basicAuthPassword: pass - # basicAuthUser: daco - # editable: false - # jsonData: - # tlsSkipVerify: true - # orgId: 1 - # type: prometheus - # url: https://prometheus.svc:9090 - # version: 1 - - ## Passed to grafana subchart and used by servicemonitor below - ## - service: - portName: service - - ## If true, create a serviceMonitor for grafana - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube api server -## -kubeApiServer: - enabled: true - tlsConfig: - serverName: kubernetes - insecureSkipVerify: false - - ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - jobLabel: component - selector: - matchLabels: - component: apiserver - provider: kubernetes - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - -## Component scraping the kubelet and kubelet-hosted cAdvisor -## -kubelet: - enabled: true - namespace: kube-system - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping the kubelet over https. For requirements to enable this see - ## https://github.com/coreos/prometheus-operator/issues/926 - ## - https: true - - ## Enable scraping /metrics/cadvisor from kubelet's service - ## - cAdvisor: true - - ## Metric relabellings to apply to samples before ingestion - ## - cAdvisorMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - # metrics_path is required to match upstream rules and charts - ## - cAdvisorRelabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - metricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - # metrics_path is required to match upstream rules and charts - ## - relabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube controller manager -## -kubeControllerManager: - enabled: true - - ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeControllerManager.endpoints only the port and targetPort are used - ## - service: - port: 10252 - targetPort: 10252 - # selector: - # component: kube-controller-manager - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - # Skip TLS certificate validation when scraping - insecureSkipVerify: null - - # Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping coreDns. Use either this or kubeDns -## -coreDns: - enabled: true - service: - port: 9153 - targetPort: 9153 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping kubeDns. Use either this or coreDns -## -kubeDns: - enabled: false - service: - dnsmasq: - port: 10054 - targetPort: 10054 - skydns: - port: 10055 - targetPort: 10055 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - dnsmasqMetricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - dnsmasqRelabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping etcd -## -kubeEtcd: - enabled: true - - ## If your etcd is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used - ## - service: - port: 2379 - targetPort: 2379 - # selector: - # component: etcd - - ## Configure secure access to the etcd cluster by loading a secret into prometheus and - ## specifying security configuration below. For example, with a secret named etcd-client-cert - ## - ## serviceMonitor: - ## scheme: https - ## insecureSkipVerify: false - ## serverName: localhost - ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client - ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - scheme: http - insecureSkipVerify: false - serverName: "" - caFile: "" - certFile: "" - keyFile: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube scheduler -## -kubeScheduler: - enabled: true - - ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeScheduler.endpoints only the port and targetPort are used - ## - service: - port: 10251 - targetPort: 10251 - # selector: - # component: kube-scheduler - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## Enable scraping kube-scheduler over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## Skip TLS certificate validation when scraping - insecureSkipVerify: null - - ## Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube proxy -## -kubeProxy: - enabled: true - - ## If your kube proxy is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - service: - port: 10249 - targetPort: 10249 - # selector: - # k8s-app: kube-proxy - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-proxy over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - -## Component scraping kube state metrics -## -kubeStateMetrics: - enabled: true - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Configuration for kube-state-metrics subchart -## -kube-state-metrics: - rbac: - create: true - podSecurityPolicy: - enabled: true - -## Deploy node exporter as a daemonset to all nodes -## -nodeExporter: - enabled: true - - ## Use the value configured in prometheus-node-exporter.podLabels - ## - jobLabel: jobLabel - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. - ## - scrapeTimeout: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - sourceLabels: [__name__] - # separator: ; - # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ - # replacement: $1 - # action: drop - - ## relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Configuration for prometheus-node-exporter subchart -## -prometheus-node-exporter: - podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards - ## - jobLabel: node-exporter - extraArgs: - - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - -## Manages Prometheus and Alertmanager components -## -prometheusOperator: - enabled: true - - # If true prometheus operator will create and update its CRDs on startup - manageCrds: true - - tlsProxy: - enabled: false - image: - repository: squareup/ghostunnel - tag: v1.5.2 - pullPolicy: IfNotPresent - resources: {} - - ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted - ## rules from making their way into prometheus and potentially preventing the container from starting - admissionWebhooks: - failurePolicy: Fail - enabled: false - ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. - ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own - ## certs ahead of time if you wish. - ## - patch: - enabled: false - image: - repository: jettech/kube-webhook-certgen - tag: v1.0.0 - pullPolicy: IfNotPresent - resources: {} - ## Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - podAnnotations: {} - nodeSelector: {} - affinity: {} - tolerations: [] - - ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). - ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration - ## - namespaces: {} - # releaseNamespace: true - # additional: - # - kube-system - - ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). - ## - denyNamespaces: [] - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus operator service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30080 - - nodePortTls: 30443 - - ## Additional ports to open for Prometheus service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Service type - ## NodePort, ClusterIP, loadbalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Deploy CRDs used by Prometheus Operator. - ## - createCustomResource: true - - ## Attempt to clean up CRDs created by Prometheus Operator. - ## - cleanupCustomResource: false - - ## Labels to add to the operator pod - ## - podLabels: {} - - ## Annotations to add to the operator pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - ## If true, the operator will create and maintain a service for scraping kubelets - ## ref: https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus-operator/README.md - ## - kubeletService: - enabled: true - namespace: kube-system - - ## Create a servicemonitor for the operator - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - securityContext: - runAsNonRoot: true - runAsUser: 65534 - - ## Prometheus-operator image - ## - image: - repository: quay.io/coreos/prometheus-operator - tag: v0.37.0 - pullPolicy: IfNotPresent - - ## Configmap-reload image to use for reloading configmaps - ## - configmapReloadImage: - repository: quay.io/coreos/configmap-reload - tag: v0.0.1 - - ## Prometheus-config-reloader image to use for config and rule reloading - ## - prometheusConfigReloaderImage: - repository: quay.io/coreos/prometheus-config-reloader - tag: v0.37.0 - - ## Set the prometheus config reloader side-car CPU limit - ## - configReloaderCpu: 100m - - ## Set the prometheus config reloader side-car memory limit - ## - configReloaderMemory: 25Mi - - ## Hyperkube image to use when cleaning up - ## - hyperkubeImage: - repository: k8s.gcr.io/hyperkube - tag: v1.12.1 - pullPolicy: IfNotPresent - -## Deploy a Prometheus instance -## -prometheus: - - enabled: true - - ## Annotations for Prometheus - ## - annotations: {} - - ## Service account for Prometheuses to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port for Prometheus Service to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30090 - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - sessionAffinity: "" - - ## Configuration for creating a separate Service for each statefulset Prometheus replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Prometheus Service per replica to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30091 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "loadbalancer" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## Configure pod disruption budgets for Prometheus - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ingress: - enabled: false - annotations: {} - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## - # hosts: - # - prometheus.domain.com - hosts: [] - - ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Prometheus Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-general-tls - # hosts: - # - prometheus.example.com - - ## Configuration for creating an Ingress that will map to each Prometheus replica service - ## prometheus.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## Secret name containing the TLS certificate for Prometheus per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} - ## - prefix: "prometheus" - - ## Configure additional options for default pod security policy for Prometheus - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - podSecurityPolicy: - allowedCapabilities: [] - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Settings affecting prometheusSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec - ## - prometheusSpec: - ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos - ## - disableCompaction: false - ## APIServerConfig - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig - ## - apiserverConfig: {} - - ## Interval between consecutive scrapes. - ## - scrapeInterval: "" - - ## Interval between consecutive evaluations. - ## - evaluationInterval: "" - - ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. - ## - listenLocal: false - - ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. - ## This is disabled by default. - ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - ## - enableAdminAPI: false - - ## Image of Prometheus. - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.15.2 - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Alertmanagers to which alerts will be sent - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints - ## - ## Default configuration will connect to the alertmanager deployed as part of this release - ## - alertingEndpoints: [] - # - name: "" - # namespace: "" - # port: http - # scheme: http - # pathPrefix: "" - # tlsConfig: {} - # bearerTokenFile: "" - # apiVersion: v2 - - ## External labels to add to any time series or alerts when communicating with external systems - ## - externalLabels: {} - - ## Name of the external label used to denote replica name - ## - replicaExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote replica name - ## - replicaExternalLabelNameClear: false - - ## Name of the external label used to denote Prometheus instance name - ## - prometheusExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote Prometheus instance name - ## - prometheusExternalLabelNameClear: false - - ## External URL at which Prometheus will be reachable. - ## - externalUrl: "" - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not - ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated - ## with the new list of secrets. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ## - configMaps: [] - - ## QuerySpec defines the query command line flags when starting Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#queryspec - ## - query: {} - - ## Namespaces to be selected for PrometheusRules discovery. - ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - ruleNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the PrometheusRule resources created - ## - ruleSelectorNilUsesHelmValues: true - - ## PrometheusRules to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - ruleSelector: {} - ## Example which select all prometheusrules resources - ## with label "prometheus" with values any of "example-rules" or "example-rules-2" - # ruleSelector: - # matchExpressions: - # - key: prometheus - # operator: In - # values: - # - example-rules - # - example-rules-2 - # - ## Example which select all prometheusrules resources with label "role" set to "example-rules" - # ruleSelector: - # matchLabels: - # role: example-rules - - ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the servicemonitors created - ## - serviceMonitorSelectorNilUsesHelmValues: true - - ## ServiceMonitors to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - serviceMonitorSelector: {} - ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" - # serviceMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - serviceMonitorNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the podmonitors created - ## - podMonitorSelectorNilUsesHelmValues: true - - ## PodMonitors to be selected for target discovery. - ## If {}, select all PodMonitors - ## - podMonitorSelector: {} - ## Example which selects PodMonitors with label "prometheus" set to "somelabel" - # podMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for PodMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - podMonitorNamespaceSelector: {} - - ## How long to retain metrics - ## - retention: 10d - - ## Maximum size of metrics - ## - retentionSize: "" - - ## Enable compression of the write-ahead log using Snappy. - ## - walCompression: false - - ## If true, the Operator won't process any Prometheus configuration changes - ## - paused: false - - ## Number of Prometheus replicas desired - ## - replicas: 1 - - ## Log level for Prometheus be configured in - ## - logLevel: info - - ## Log format for Prometheus be configured in - ## - logFormat: logfmt - - ## Prefix used to register routes, overriding externalUrl route. - ## Useful for proxies that rewrite URLs. - ## - routePrefix: / - - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the prometheus pods. - ## - podMetadata: {} - # labels: - # app: prometheus - # k8s-app: prometheus - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the prometheus instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## The remote_read spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec - remoteRead: [] - # - url: http://remote1/read - - ## The remote_write spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec - remoteWrite: [] - # remoteWrite: - # - url: http://thanos-receive:19291/api/v1/receive - - ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature - remoteWriteDashboards: false - - ## Resource limits & requests - ## - resources: {} - # requests: - # memory: 400Mi - - ## Prometheus StorageSpec for persistent data - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storageSpec: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations - ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form - ## as specified in the official Prometheus documentation: - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are - ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility - ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible - ## scrape configs are going to break Prometheus after the upgrade. - ## - ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the - ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes - ## - additionalScrapeConfigs: [] - # - job_name: kube-etcd - # kubernetes_sd_configs: - # - role: node - # scheme: https - # tls_config: - # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client - # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - # relabel_configs: - # - action: labelmap - # regex: __meta_kubernetes_node_label_(.+) - # - source_labels: [__address__] - # action: replace - # targetLabel: __address__ - # regex: ([^:;]+):(\d+) - # replacement: ${1}:2379 - # - source_labels: [__meta_kubernetes_node_name] - # action: keep - # regex: .*mst.* - # - source_labels: [__meta_kubernetes_node_name] - # action: replace - # targetLabel: node - # regex: (.*) - # replacement: ${1} - # metric_relabel_configs: - # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - # action: labeldrop - - ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful - ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' - additionalPrometheusSecretsAnnotations: {} - - ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified - ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. - ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. - ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this - ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release - ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. - ## - additionalAlertManagerConfigs: [] - # - consul_sd_configs: - # - server: consul.dev.test:8500 - # scheme: http - # datacenter: dev - # tag_separator: ',' - # services: - # - metrics-prometheus-alertmanager - - ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended - ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the - ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the - ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel - ## configs are going to break Prometheus after the upgrade. - ## - additionalAlertRelabelConfigs: [] - # - separator: ; - # regex: prometheus_replica - # replacement: $1 - # action: labeldrop - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. - ## This section is experimental, it may change significantly without deprecation notice in any release. - ## This is experimental and may change significantly without backward compatibility in any release. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec - ## - # thanos: - # baseImage: thanosio/thanos - # version: v0.16.0 - # objectStorageConfig: - # key: thanos.yaml - # name: thanos-objstore-config - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. - ## if using proxy extraContainer update targetPort with proxy container port - containers: [] - - ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes - ## (permissions, dir tree) on mounted volumes before starting prometheus - initContainers: [] - - ## Enable additional scrape configs that are managed externally to this chart. Note that the prometheus - ## will fail to provision if the correct secret does not exist. - ## This option requires that you are maintaining a secret in the same namespace as Prometheus with - ## a name of 'prometheus-operator-prometheus-scrape-confg' and a key of 'additional-scrape-configs.yaml' that - ## contains a list of scrape_config's. The name of the secret may vary if you utilize the "fullnameOverride". - ## This feature cannot be used in conjunction with the additionalScrapeConfigs attribute (the helm-generated - ## secret will overwrite your self-maintained secret). - ## - ## scrape_config docs: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config - ## explanation of "confg" typo: https://github.com/helm/charts/issues/13368 - additionalScrapeConfigsExternal: false - - ## PortName to use for Prometheus. - ## - portName: "web" - - additionalServiceMonitors: [] - ## Name of the ServiceMonitor to create - ## - # - name: "" - - ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from - ## the chart - ## - # additionalLabels: {} - - ## Service label for use in assembling a job name of the form