diff --git a/cmd/exporters/prometheus/httpd.go b/cmd/exporters/prometheus/httpd.go index f8f82e451..7d6d7561a 100644 --- a/cmd/exporters/prometheus/httpd.go +++ b/cmd/exporters/prometheus/httpd.go @@ -9,6 +9,7 @@ package prometheus import ( "errors" "fmt" + "github.com/netapp/harvest/v2/cmd/exporters" "log/slog" "net" "net/http" @@ -148,7 +149,7 @@ func (p *Prometheus) ServeMetrics(w http.ResponseWriter, r *http.Request) { // serve our own metadata // notice that some values are always taken from previous session - md, _, _ := p.render(p.Metadata) + md, _, _ := exporters.Render(p.Metadata, p.addMetaTags, p.Params.SortLabels, p.globalPrefix, p.Logger, "") _, err = p.aCache.streamMetrics(w, tagsSeen, md) if err != nil { p.Logger.Error("failed to stream metadata metrics", slogx.Err(err)) diff --git a/cmd/exporters/prometheus/prometheus.go b/cmd/exporters/prometheus/prometheus.go index a4e22f14e..d9f782379 100644 --- a/cmd/exporters/prometheus/prometheus.go +++ b/cmd/exporters/prometheus/prometheus.go @@ -22,9 +22,8 @@ Package Description: package prometheus import ( - "bytes" + "github.com/netapp/harvest/v2/cmd/exporters" "github.com/netapp/harvest/v2/cmd/poller/exporter" - "github.com/netapp/harvest/v2/cmd/poller/plugin/changelog" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/set" @@ -32,9 +31,6 @@ import ( "log/slog" "path/filepath" "regexp" - "slices" - "sort" - "strconv" "strings" "time" ) @@ -93,7 +89,7 @@ func (p *Prometheus) Init() error { return err } - p.replacer = newReplacer() + p.replacer = exporters.NewReplacer() if instance, err := p.Metadata.NewInstance("info"); err == nil { instance.SetLabel("task", "info") @@ -212,10 +208,6 @@ func (p *Prometheus) Init() error { return nil } -func newReplacer() *strings.Replacer { - return strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "\\n") -} - // Export - Unlike other Harvest exporters, we don't export data // but put it in cache. The HTTP daemon serves that cache on request. // @@ -239,7 +231,7 @@ func (p *Prometheus) Export(data *matrix.Matrix) (exporter.Stats, error) { // render metrics into Prometheus format start := time.Now() - metrics, stats, metricNames = p.render(data) + metrics, stats, metricNames = exporters.Render(data, p.addMetaTags, p.Params.SortLabels, p.globalPrefix, p.Logger, "") // fix render time for metadata d := time.Since(start) @@ -262,524 +254,3 @@ func (p *Prometheus) Export(data *matrix.Matrix) (exporter.Stats, error) { return stats, nil } - -// Render metrics and labels into the exposition format, as described in -// https://prometheus.io/docs/instrumenting/exposition_formats/ -// -// All metrics are implicitly "Gauge" counters. If requested, we also submit -// HELP and TYPE metadata (see add_meta_tags in config). -// -// Metric name is concatenation of the collector object (e.g. "volume", -// "fcp_lif") + the metric name (e.g. "read_ops" => "volume_read_ops"). -// We do this since the same metrics for different objects can have -// different sets of labels, and Prometheus does not allow this. -// -// Example outputs: -// -// volume_read_ops{node="my-node",vol="some_vol"} 2523 -// fcp_lif_read_ops{vserver="nas_svm",port_id="e02"} 771 - -func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats, *set.Set) { - var ( - rendered [][]byte - tagged *set.Set - labelsToInclude []string - keysToInclude []string - prefix string - err error - joinedKeys string - histograms map[string]*histogram - normalizedLabels map[string][]string // cache of histogram normalized labels - instancesExported uint64 - renderedBytes uint64 - instanceKeysOk bool - buf bytes.Buffer // shared buffer for rendering - ) - - buf.Grow(4096) - globalLabels := make([]string, 0, len(data.GetGlobalLabels())) - normalizedLabels = make(map[string][]string) - metricNames := set.New() - - if p.addMetaTags { - tagged = set.New() - } - - options := data.GetExportOptions() - - if x := options.GetChildS("instance_labels"); x != nil { - labelsToInclude = x.GetAllChildContentS() - } - - if x := options.GetChildS("instance_keys"); x != nil { - keysToInclude = x.GetAllChildContentS() - } - - includeAllLabels := false - requireInstanceKeys := true - - if x := options.GetChildContentS("include_all_labels"); x != "" { - if includeAllLabels, err = strconv.ParseBool(x); err != nil { - p.Logger.Error("parameter: include_all_labels", slogx.Err(err)) - } - } - - if x := options.GetChildContentS("require_instance_keys"); x != "" { - if requireInstanceKeys, err = strconv.ParseBool(x); err != nil { - p.Logger.Error("parameter: require_instance_keys", slogx.Err(err)) - } - } - - if data.Object == "" { - prefix = strings.TrimSuffix(p.globalPrefix, "_") - } else { - prefix = p.globalPrefix + data.Object - } - - for key, value := range data.GetGlobalLabels() { - globalLabels = append(globalLabels, escape(p.replacer, key, value)) - } - - // Count the number of metrics so the rendered slice can be sized without reallocation - numMetrics := 0 - - exportableInstances := 0 - exportableMetrics := 0 - - for _, instance := range data.GetInstances() { - if !instance.IsExportable() { - continue - } - exportableInstances++ - } - - for _, metric := range data.GetMetrics() { - if !metric.IsExportable() { - continue - } - metricNames.Add(prefix + "_" + metric.GetName()) - exportableMetrics++ - } - - numMetrics += exportableInstances * exportableMetrics - if p.addMetaTags { - numMetrics += exportableMetrics * 2 // for help and type - } - - rendered = make([][]byte, 0, numMetrics) - - for _, instance := range data.GetInstances() { - - if !instance.IsExportable() { - continue - } - instancesExported++ - - moreKeys := 0 - if includeAllLabels { - moreKeys = len(instance.GetLabels()) - } - - instanceKeys := make([]string, 0, len(globalLabels)+len(keysToInclude)+moreKeys) - instanceKeys = append(instanceKeys, globalLabels...) - - instanceLabels := make([]string, 0, len(labelsToInclude)) - instanceLabelsSet := make(map[string]struct{}) - - // The ChangeLog plugin tracks metric values and publishes the names of metrics that have changed. - // For example, it might indicate that 'volume_size_total' has been updated. - // If a global prefix for the exporter is defined, we need to amend the metric name with this prefix. - if p.globalPrefix != "" && data.Object == changelog.ObjectChangeLog { - if categoryValue, ok := instance.GetLabels()[changelog.Category]; ok { - if categoryValue == changelog.Metric { - if tracked, ok := instance.GetLabels()[changelog.Track]; ok { - instance.GetLabels()[changelog.Track] = p.globalPrefix + tracked - } - } - } - } - - if includeAllLabels { - for label, value := range instance.GetLabels() { - // temporary fix for the rarely happening duplicate labels - // known case is: ZapiPerf -> 7mode -> disk.yaml - // actual cause is the Aggregator plugin, which is adding node as - // instance label (even though it's already a global label for 7modes) - _, ok := data.GetGlobalLabels()[label] - if !ok { - escaped := escape(p.replacer, label, value) - instanceKeys = append(instanceKeys, escaped) - } - } - } else { - for _, key := range keysToInclude { - value := instance.GetLabel(key) - escaped := escape(p.replacer, key, value) - instanceKeys = append(instanceKeys, escaped) - if !instanceKeysOk && value != "" { - instanceKeysOk = true - } - } - - for _, label := range labelsToInclude { - value := instance.GetLabel(label) - kv := escape(p.replacer, label, value) - _, ok := instanceLabelsSet[kv] - if ok { - continue - } - instanceLabelsSet[kv] = struct{}{} - instanceLabels = append(instanceLabels, kv) - } - - // @TODO, probably be strict, and require all keys to be present - if !instanceKeysOk && requireInstanceKeys { - continue - } - - // @TODO, check at least one label is found? - if len(instanceLabels) != 0 { - allLabels := make([]string, 0, len(instanceLabels)+len(instanceKeys)) - allLabels = append(allLabels, instanceLabels...) - // include each instanceKey not already included in the list of labels - for _, instanceKey := range instanceKeys { - _, ok := instanceLabelsSet[instanceKey] - if ok { - continue - } - instanceLabelsSet[instanceKey] = struct{}{} - allLabels = append(allLabels, instanceKey) - } - if p.Params.SortLabels { - sort.Strings(allLabels) - } - - buf.Reset() - - buf.WriteString(prefix) - buf.WriteString("_labels{") - buf.WriteString(strings.Join(allLabels, ",")) - buf.WriteString("} 1.0") - - xbr := buf.Bytes() - labelData := make([]byte, len(xbr)) - copy(labelData, xbr) - - prefixed := prefix + "_labels" - if tagged != nil && !tagged.Has(prefixed) { - tagged.Add(prefixed) - help := "# HELP " + prefixed + " Pseudo-metric for " + data.Object + " labels" - typeT := "# TYPE " + prefixed + " gauge" - rendered = append(rendered, []byte(help), []byte(typeT)) - renderedBytes += uint64(len(help)) + uint64(len(typeT)) - } - rendered = append(rendered, labelData) - renderedBytes += uint64(len(labelData)) - } - } - - if p.Params.SortLabels { - sort.Strings(instanceKeys) - } - - joinedKeys = strings.Join(instanceKeys, ",") - histograms = make(map[string]*histogram) - - for _, metric := range data.GetMetrics() { - - if !metric.IsExportable() { - continue - } - - if value, ok := metric.GetValueString(instance); ok { - - // metric is array, determine if this is a plain array or histogram - if metric.HasLabels() { - if metric.IsHistogram() { - // Metric is histogram. Create a new metric to accumulate - // the flattened metrics and export them in order - bucketMetric := data.GetMetric(metric.GetLabel("bucket")) - if bucketMetric == nil { - p.Logger.Debug( - "Unable to find bucket for metric, skip", - slog.String("metric", metric.GetName()), - ) - continue - } - metricIndex := metric.GetLabel("comment") - index, err := strconv.Atoi(metricIndex) - if err != nil { - p.Logger.Error( - "Unable to find index of metric, skip", - slog.String("metric", metric.GetName()), - slog.String("index", metricIndex), - ) - } - histogram := histogramFromBucket(histograms, bucketMetric) - histogram.values[index] = value - continue - } - metricLabels := make([]string, 0, len(metric.GetLabels())) - for k, v := range metric.GetLabels() { - metricLabels = append(metricLabels, escape(p.replacer, k, v)) - } - if p.Params.SortLabels { - sort.Strings(metricLabels) - } - - buf.Reset() - buf.WriteString(prefix) - buf.WriteString("_") - buf.WriteString(metric.GetName()) - buf.WriteString("{") - buf.WriteString(joinedKeys) - buf.WriteString(",") - buf.WriteString(strings.Join(metricLabels, ",")) - buf.WriteString("} ") - buf.WriteString(value) - - xbr := buf.Bytes() - metricLine := make([]byte, len(xbr)) - copy(metricLine, xbr) - - prefixedName := prefix + "_" + metric.GetName() - if tagged != nil && !tagged.Has(prefixedName) { - tagged.Add(prefixedName) - help := "# HELP " + prefixedName + " Metric for " + data.Object - typeT := "# TYPE " + prefixedName + " gauge" - rendered = append(rendered, []byte(help), []byte(typeT)) - renderedBytes += uint64(len(help)) + uint64(len(typeT)) - } - - rendered = append(rendered, metricLine) - renderedBytes += uint64(len(metricLine)) - // scalar metric - } else { - buf.Reset() - - if prefix == "" { - buf.WriteString(metric.GetName()) - buf.WriteString("{") - buf.WriteString(joinedKeys) - buf.WriteString("} ") - buf.WriteString(value) - } else { - buf.WriteString(prefix) - buf.WriteString("_") - buf.WriteString(metric.GetName()) - buf.WriteString("{") - buf.WriteString(joinedKeys) - buf.WriteString("} ") - buf.WriteString(value) - } - xbr := buf.Bytes() - scalarMetric := make([]byte, len(xbr)) - copy(scalarMetric, xbr) - - prefixedName := prefix + "_" + metric.GetName() - if tagged != nil && !tagged.Has(prefixedName) { - tagged.Add(prefixedName) - - buf.Reset() - buf.WriteString("# HELP ") - buf.WriteString(prefixedName) - buf.WriteString(" Metric for ") - buf.WriteString(data.Object) - - xbr := buf.Bytes() - helpB := make([]byte, len(xbr)) - copy(helpB, xbr) - - rendered = append(rendered, helpB) - renderedBytes += uint64(len(helpB)) - - buf.Reset() - buf.WriteString("# TYPE ") - buf.WriteString(prefixedName) - buf.WriteString(" gauge") - - tbr := buf.Bytes() - typeB := make([]byte, len(tbr)) - copy(typeB, tbr) - - rendered = append(rendered, typeB) - renderedBytes += uint64(len(typeB)) - } - - rendered = append(rendered, scalarMetric) - renderedBytes += uint64(len(scalarMetric)) - } - } - } - - // All metrics have been processed and flattened metrics accumulated. Determine which histograms can be - // normalized and exported. - for _, h := range histograms { - metric := h.metric - bucketNames := metric.Buckets() - objectMetric := data.Object + "_" + metric.GetName() - _, ok := normalizedLabels[objectMetric] - if !ok { - canNormalize := true - normalizedNames := make([]string, 0, len(*bucketNames)) - // check if the buckets can be normalized and collect normalized names - for _, bucketName := range *bucketNames { - normalized := p.normalizeHistogram(bucketName) - if normalized == "" { - canNormalize = false - break - } - normalizedNames = append(normalizedNames, normalized) - } - if canNormalize { - normalizedLabels[objectMetric] = normalizedNames - } - } - - // Before writing out the histogram, check that every bucket value is non-empty. - // Some bucket values may be empty if certain bucket metrics were skipped in the collector while others were not. - allBucketsHaveValues := true - if slices.Contains(h.values, "") { - allBucketsHaveValues = false - } - if !allBucketsHaveValues { - // Skip rendering this histogram entirely. - continue - } - - prefixedName := prefix + "_" + metric.GetName() - if tagged != nil && !tagged.Has(prefixedName) { - tagged.Add(prefix + "_" + metric.GetName()) - - help := "# HELP " + prefixedName + " Metric for " + data.Object - typeT := "# TYPE " + prefixedName + " histogram" - rendered = append(rendered, []byte(help), []byte(typeT)) - renderedBytes += uint64(len(help)) + uint64(len(typeT)) - } - - normalizedNames, canNormalize := normalizedLabels[objectMetric] - var ( - countMetric string - sumMetric string - ) - if canNormalize { - count, sum := h.computeCountAndSum(normalizedNames) - countMetric = prefix + "_" + metric.GetName() + "_count{" + joinedKeys + "} " + count - sumMetric = prefix + "_" + metric.GetName() + "_sum{" + joinedKeys + "} " + strconv.Itoa(sum) - } - for i, value := range h.values { - bucketName := (*bucketNames)[i] - var x string - if canNormalize { - x = prefix + "_" + metric.GetName() + "_bucket{" + joinedKeys + `,le="` + normalizedNames[i] + `"} ` + value - } else { - x = prefix + "_" + metric.GetName() + "{" + joinedKeys + `,` + escape(p.replacer, "metric", bucketName) + "} " + value - } - rendered = append(rendered, []byte(x)) - renderedBytes += uint64(len(x)) - } - if canNormalize { - rendered = append(rendered, []byte(countMetric), []byte(sumMetric)) - renderedBytes += uint64(len(countMetric)) + uint64(len(sumMetric)) - } - } - } - - // Both memory and disk cache add a newline character after each metric line - // when serving via HTTP (see writeMetric() and writeToDisk()) - renderedBytes += uint64(len(rendered)) // Add 1 byte per line for '\n' - - stats := exporter.Stats{ - InstancesExported: instancesExported, - MetricsExported: uint64(len(rendered)), - RenderedBytes: renderedBytes, - } - - return rendered, stats, metricNames -} - -var numAndUnitRe = regexp.MustCompile(`(\d+)\s*(\w+)`) - -// normalizeHistogram tries to normalize ONTAP values by converting units to multiples of the smallest unit. -// When the unit cannot be determined, return an empty string -func (p *Prometheus) normalizeHistogram(ontap string) string { - numAndUnit := ontap - if strings.HasPrefix(ontap, "<") { - numAndUnit = ontap[1:] - } else if strings.HasPrefix(ontap, ">") { - return "+Inf" - } - submatch := numAndUnitRe.FindStringSubmatch(numAndUnit) - if len(submatch) != 3 { - return "" - } - num := submatch[1] - unit := submatch[2] - float, err := strconv.ParseFloat(num, 64) - if err != nil { - return "" - } - var normal float64 - switch unit { - case "us": - return num - case "ms", "msec": - normal = 1_000 * float - case "s", "sec": - normal = 1_000_000 * float - default: - return "" - } - return strconv.FormatFloat(normal, 'f', -1, 64) -} - -func histogramFromBucket(histograms map[string]*histogram, metric *matrix.Metric) *histogram { - h, ok := histograms[metric.GetName()] - if ok { - return h - } - buckets := metric.Buckets() - var capacity int - if buckets != nil { - capacity = len(*buckets) - } - h = &histogram{ - metric: metric, - values: make([]string, capacity), - } - histograms[metric.GetName()] = h - return h -} - -func escape(replacer *strings.Replacer, key string, value string) string { - // See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information - // label_value can be any sequence of UTF-8 characters, but the backslash (\), double-quote ("), - // and line feed (\n) characters have to be escaped as \\, \", and \n, respectively. - - return key + "=" + strconv.Quote(replacer.Replace(value)) -} - -type histogram struct { - metric *matrix.Metric - values []string -} - -func (h *histogram) computeCountAndSum(normalizedNames []string) (string, int) { - // If the buckets are normalizable, iterate through the values to: - // 1) calculate Prometheus's cumulative buckets - // 2) add _count metric - // 3) calculate and add _sum metric - cumValues := make([]string, len(h.values)) - runningTotal := 0 - sum := 0 - for i, value := range h.values { - num, _ := strconv.Atoi(value) - runningTotal += num - cumValues[i] = strconv.Itoa(runningTotal) - normalName := normalizedNames[i] - leValue, _ := strconv.Atoi(normalName) - sum += leValue * num - } - h.values = cumValues - return cumValues[len(cumValues)-1], sum -} diff --git a/cmd/exporters/prometheus/prometheus_test.go b/cmd/exporters/prometheus/prometheus_test.go index 1e2f6f75b..37e709152 100644 --- a/cmd/exporters/prometheus/prometheus_test.go +++ b/cmd/exporters/prometheus/prometheus_test.go @@ -11,6 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/netapp/harvest/v2/assert" + "github.com/netapp/harvest/v2/cmd/exporters" "github.com/netapp/harvest/v2/cmd/poller/exporter" "github.com/netapp/harvest/v2/cmd/poller/options" "github.com/netapp/harvest/v2/pkg/conf" @@ -18,7 +19,7 @@ import ( ) func TestEscape(t *testing.T) { - replacer := newReplacer() + replacer := exporters.NewReplacer() type test struct { key string @@ -35,16 +36,16 @@ func TestEscape(t *testing.T) { for _, tc := range tests { t.Run(tc.want, func(t *testing.T) { - got := escape(replacer, tc.key, tc.value) + got := exporters.Escape(replacer, tc.key, tc.value) assert.Equal(t, got, tc.want) }) } } func BenchmarkEscape(b *testing.B) { - replacer := newReplacer() + replacer := exporters.NewReplacer() for b.Loop() { - escape(replacer, "abc", `a\c"foo"\ndef`) + exporters.Escape(replacer, "abc", `a\c"foo"\ndef`) } } diff --git a/cmd/exporters/utils.go b/cmd/exporters/utils.go new file mode 100644 index 000000000..62218710c --- /dev/null +++ b/cmd/exporters/utils.go @@ -0,0 +1,559 @@ +package exporters + +import ( + "bytes" + "github.com/netapp/harvest/v2/cmd/poller/exporter" + "github.com/netapp/harvest/v2/cmd/poller/plugin/changelog" + "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/netapp/harvest/v2/pkg/set" + "github.com/netapp/harvest/v2/pkg/slogx" + "log/slog" + "regexp" + "slices" + "sort" + "strconv" + "strings" +) + +var numAndUnitRe = regexp.MustCompile(`(\d+)\s*(\w+)`) + +type Histogram struct { + Metric *matrix.Metric + Values []string +} + +func Escape(replacer *strings.Replacer, key string, value string) string { + // See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information + // label_value can be any sequence of UTF-8 characters, but the backslash (\), double-quote ("), + // and line feed (\n) characters have to be escaped as \\, \", and \n, respectively. + + return key + "=" + strconv.Quote(replacer.Replace(value)) +} + +func NewReplacer() *strings.Replacer { + return strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "\\n") +} + +func HistogramFromBucket(histograms map[string]*Histogram, metric *matrix.Metric) *Histogram { + h, ok := histograms[metric.GetName()] + if ok { + return h + } + buckets := metric.Buckets() + var capacity int + if buckets != nil { + capacity = len(*buckets) + } + h = &Histogram{ + Metric: metric, + Values: make([]string, capacity), + } + histograms[metric.GetName()] = h + return h +} + +func (h *Histogram) ComputeCountAndSum(normalizedNames []string) (string, int) { + // If the buckets are normalizable, iterate through the values to: + // 1) calculate Prometheus's cumulative buckets + // 2) add _count metric + // 3) calculate and add _sum metric + cumValues := make([]string, len(h.Values)) + runningTotal := 0 + sum := 0 + for i, value := range h.Values { + num, _ := strconv.Atoi(value) + runningTotal += num + cumValues[i] = strconv.Itoa(runningTotal) + normalName := normalizedNames[i] + leValue, _ := strconv.Atoi(normalName) + sum += leValue * num + } + h.Values = cumValues + return cumValues[len(cumValues)-1], sum +} + +// NormalizeHistogram tries to normalize ONTAP values by converting units to multiples of the smallest unit. +// When the unit cannot be determined, return an empty string +func NormalizeHistogram(ontap string) string { + numAndUnit := ontap + if strings.HasPrefix(ontap, "<") { + numAndUnit = ontap[1:] + } else if strings.HasPrefix(ontap, ">") { + return "+Inf" + } + submatch := numAndUnitRe.FindStringSubmatch(numAndUnit) + if len(submatch) != 3 { + return "" + } + num := submatch[1] + unit := submatch[2] + float, err := strconv.ParseFloat(num, 64) + if err != nil { + return "" + } + var normal float64 + switch unit { + case "us": + return num + case "ms", "msec": + normal = 1_000 * float + case "s", "sec": + normal = 1_000_000 * float + default: + return "" + } + return strconv.FormatFloat(normal, 'f', -1, 64) +} + +// Render metrics and labels into the exposition format, as described in +// https://prometheus.io/docs/instrumenting/exposition_formats/ +// +// All metrics are implicitly "Gauge" counters. If requested, we also submit +// HELP and TYPE metadata (see add_meta_tags in config). +// +// Metric name is concatenation of the collector object (e.g. "volume", +// "fcp_lif") + the metric name (e.g. "read_ops" => "volume_read_ops"). +// We do this since the same metrics for different objects can have +// different sets of labels, and Prometheus does not allow this. +// +// Example outputs: +// +// volume_read_ops{node="my-node",vol="some_vol"} 2523 +// fcp_lif_read_ops{vserver="nas_svm",port_id="e02"} 771 + +func Render(data *matrix.Matrix, addMetaTags bool, sortLabels bool, globalPrefix string, logger *slog.Logger, timestamp string) ([][]byte, exporter.Stats, *set.Set) { + var ( + rendered [][]byte + tagged *set.Set + labelsToInclude []string + keysToInclude []string + prefix string + err error + joinedKeys string + histograms map[string]*Histogram + normalizedLabels map[string][]string // cache of histogram normalized labels + instancesExported uint64 + renderedBytes uint64 + instanceKeysOk bool + buf bytes.Buffer // shared buffer for rendering + ) + + buf.Grow(4096) + globalLabels := make([]string, 0, len(data.GetGlobalLabels())) + normalizedLabels = make(map[string][]string) + metricNames := set.New() + + replacer := NewReplacer() + + if addMetaTags { + tagged = set.New() + } + + options := data.GetExportOptions() + + if x := options.GetChildS("instance_labels"); x != nil { + labelsToInclude = x.GetAllChildContentS() + } + + if x := options.GetChildS("instance_keys"); x != nil { + keysToInclude = x.GetAllChildContentS() + } + + includeAllLabels := false + requireInstanceKeys := true + + if x := options.GetChildContentS("include_all_labels"); x != "" { + if includeAllLabels, err = strconv.ParseBool(x); err != nil { + logger.Error("parameter: include_all_labels", slogx.Err(err)) + } + } + + if x := options.GetChildContentS("require_instance_keys"); x != "" { + if requireInstanceKeys, err = strconv.ParseBool(x); err != nil { + logger.Error("parameter: require_instance_keys", slogx.Err(err)) + } + } + + if data.Object == "" { + prefix = strings.TrimSuffix(globalPrefix, "_") + } else { + prefix = globalPrefix + data.Object + } + + for key, value := range data.GetGlobalLabels() { + globalLabels = append(globalLabels, Escape(replacer, key, value)) + } + + // Count the number of metrics so the rendered slice can be sized without reallocation + numMetrics := 0 + + exportableInstances := 0 + exportableMetrics := 0 + + for _, instance := range data.GetInstances() { + if !instance.IsExportable() { + continue + } + exportableInstances++ + } + + for _, metric := range data.GetMetrics() { + if !metric.IsExportable() { + continue + } + metricNames.Add(prefix + "_" + metric.GetName()) + exportableMetrics++ + } + + numMetrics += exportableInstances * exportableMetrics + if addMetaTags { + numMetrics += exportableMetrics * 2 // for help and type + } + + rendered = make([][]byte, 0, numMetrics) + + for _, instance := range data.GetInstances() { + + if !instance.IsExportable() { + continue + } + instancesExported++ + + moreKeys := 0 + if includeAllLabels { + moreKeys = len(instance.GetLabels()) + } + + instanceKeys := make([]string, 0, len(globalLabels)+len(keysToInclude)+moreKeys) + instanceKeys = append(instanceKeys, globalLabels...) + + instanceLabels := make([]string, 0, len(labelsToInclude)) + instanceLabelsSet := make(map[string]struct{}) + + // The ChangeLog plugin tracks metric values and publishes the names of metrics that have changed. + // For example, it might indicate that 'volume_size_total' has been updated. + // If a global prefix for the exporter is defined, we need to amend the metric name with this prefix. + if globalPrefix != "" && data.Object == changelog.ObjectChangeLog { + if categoryValue, ok := instance.GetLabels()[changelog.Category]; ok { + if categoryValue == changelog.Metric { + if tracked, ok := instance.GetLabels()[changelog.Track]; ok { + instance.GetLabels()[changelog.Track] = globalPrefix + tracked + } + } + } + } + + if includeAllLabels { + for label, value := range instance.GetLabels() { + // temporary fix for the rarely happening duplicate labels + // known case is: ZapiPerf -> 7mode -> disk.yaml + // actual cause is the Aggregator plugin, which is adding node as + // instance label (even though it's already a global label for 7modes) + _, ok := data.GetGlobalLabels()[label] + if !ok { + escaped := Escape(replacer, label, value) + instanceKeys = append(instanceKeys, escaped) + } + } + } else { + for _, key := range keysToInclude { + value := instance.GetLabel(key) + escaped := Escape(replacer, key, value) + instanceKeys = append(instanceKeys, escaped) + if !instanceKeysOk && value != "" { + instanceKeysOk = true + } + } + + for _, label := range labelsToInclude { + value := instance.GetLabel(label) + kv := Escape(replacer, label, value) + _, ok := instanceLabelsSet[kv] + if ok { + continue + } + instanceLabelsSet[kv] = struct{}{} + instanceLabels = append(instanceLabels, kv) + } + + if !instanceKeysOk && requireInstanceKeys { + continue + } + + if len(instanceLabels) != 0 { + allLabels := make([]string, 0, len(instanceLabels)+len(instanceKeys)) + allLabels = append(allLabels, instanceLabels...) + // include each instanceKey not already included in the list of labels + for _, instanceKey := range instanceKeys { + _, ok := instanceLabelsSet[instanceKey] + if ok { + continue + } + instanceLabelsSet[instanceKey] = struct{}{} + allLabels = append(allLabels, instanceKey) + } + if sortLabels { + sort.Strings(allLabels) + } + + buf.Reset() + + buf.WriteString(prefix) + buf.WriteString("_labels{") + buf.WriteString(strings.Join(allLabels, ",")) + buf.WriteString("} 1.0") + if timestamp != "" { + buf.WriteString(" ") + buf.WriteString(timestamp) + } + + xbr := buf.Bytes() + labelData := make([]byte, len(xbr)) + copy(labelData, xbr) + + prefixed := prefix + "_labels" + if tagged != nil && !tagged.Has(prefixed) { + tagged.Add(prefixed) + help := "# HELP " + prefixed + " Pseudo-metric for " + data.Object + " labels" + typeT := "# TYPE " + prefixed + " gauge" + rendered = append(rendered, []byte(help), []byte(typeT)) + renderedBytes += uint64(len(help)) + uint64(len(typeT)) + } + rendered = append(rendered, labelData) + renderedBytes += uint64(len(labelData)) + } + } + + if sortLabels { + sort.Strings(instanceKeys) + } + + joinedKeys = strings.Join(instanceKeys, ",") + histograms = make(map[string]*Histogram) + + for _, metric := range data.GetMetrics() { + + if !metric.IsExportable() { + continue + } + + if value, ok := metric.GetValueString(instance); ok { + + // metric is array, determine if this is a plain array or histogram + if metric.HasLabels() { + if metric.IsHistogram() { + // Metric is histogram. Create a new metric to accumulate + // the flattened metrics and export them in order + bucketMetric := data.GetMetric(metric.GetLabel("bucket")) + if bucketMetric == nil { + logger.Debug( + "Unable to find bucket for metric, skip", + slog.String("metric", metric.GetName()), + ) + continue + } + metricIndex := metric.GetLabel("comment") + index, err := strconv.Atoi(metricIndex) + if err != nil { + logger.Error( + "Unable to find index of metric, skip", + slog.String("metric", metric.GetName()), + slog.String("index", metricIndex), + ) + } + histogram := HistogramFromBucket(histograms, bucketMetric) + histogram.Values[index] = value + continue + } + metricLabels := make([]string, 0, len(metric.GetLabels())) + for k, l := range metric.GetLabels() { + metricLabels = append(metricLabels, Escape(replacer, k, l)) + } + if sortLabels { + sort.Strings(metricLabels) + } + + buf.Reset() + buf.WriteString(prefix) + buf.WriteString("_") + buf.WriteString(metric.GetName()) + buf.WriteString("{") + buf.WriteString(joinedKeys) + buf.WriteString(",") + buf.WriteString(strings.Join(metricLabels, ",")) + buf.WriteString("} ") + buf.WriteString(value) + if timestamp != "" { + buf.WriteString(" ") + buf.WriteString(timestamp) + } + + xbr := buf.Bytes() + metricLine := make([]byte, len(xbr)) + copy(metricLine, xbr) + + prefixedName := prefix + "_" + metric.GetName() + if tagged != nil && !tagged.Has(prefixedName) { + tagged.Add(prefixedName) + help := "# HELP " + prefixedName + " Metric for " + data.Object + typeT := "# TYPE " + prefixedName + " gauge" + rendered = append(rendered, []byte(help), []byte(typeT)) + renderedBytes += uint64(len(help)) + uint64(len(typeT)) + } + + rendered = append(rendered, metricLine) + renderedBytes += uint64(len(metricLine)) + // scalar metric + } else { + buf.Reset() + + if prefix == "" { + buf.WriteString(metric.GetName()) + buf.WriteString("{") + buf.WriteString(joinedKeys) + buf.WriteString("} ") + buf.WriteString(value) + } else { + buf.WriteString(prefix) + buf.WriteString("_") + buf.WriteString(metric.GetName()) + buf.WriteString("{") + buf.WriteString(joinedKeys) + buf.WriteString("} ") + buf.WriteString(value) + } + if timestamp != "" { + buf.WriteString(" ") + buf.WriteString(timestamp) + } + xbr := buf.Bytes() + scalarMetric := make([]byte, len(xbr)) + copy(scalarMetric, xbr) + + prefixedName := prefix + "_" + metric.GetName() + if tagged != nil && !tagged.Has(prefixedName) { + tagged.Add(prefixedName) + + buf.Reset() + buf.WriteString("# HELP ") + buf.WriteString(prefixedName) + buf.WriteString(" Metric for ") + buf.WriteString(data.Object) + + xbr := buf.Bytes() + helpB := make([]byte, len(xbr)) + copy(helpB, xbr) + + rendered = append(rendered, helpB) + renderedBytes += uint64(len(helpB)) + + buf.Reset() + buf.WriteString("# TYPE ") + buf.WriteString(prefixedName) + buf.WriteString(" gauge") + + tbr := buf.Bytes() + typeB := make([]byte, len(tbr)) + copy(typeB, tbr) + + rendered = append(rendered, typeB) + renderedBytes += uint64(len(typeB)) + } + + rendered = append(rendered, scalarMetric) + renderedBytes += uint64(len(scalarMetric)) + } + } + } + + // All metrics have been processed and flattened metrics accumulated. Determine which histograms can be + // normalized and exported. + for _, h := range histograms { + metric := h.Metric + bucketNames := metric.Buckets() + objectMetric := data.Object + "_" + metric.GetName() + _, ok := normalizedLabels[objectMetric] + if !ok { + canNormalize := true + normalizedNames := make([]string, 0, len(*bucketNames)) + // check if the buckets can be normalized and collect normalized names + for _, bucketName := range *bucketNames { + normalized := NormalizeHistogram(bucketName) + if normalized == "" { + canNormalize = false + break + } + normalizedNames = append(normalizedNames, normalized) + } + if canNormalize { + normalizedLabels[objectMetric] = normalizedNames + } + } + + // Before writing out the histogram, check that every bucket value is non-empty. + // Some bucket values may be empty if certain bucket metrics were skipped in the collector while others were not. + allBucketsHaveValues := true + if slices.Contains(h.Values, "") { + allBucketsHaveValues = false + } + if !allBucketsHaveValues { + // Skip rendering this histogram entirely. + continue + } + + prefixedName := prefix + "_" + metric.GetName() + if tagged != nil && !tagged.Has(prefixedName) { + tagged.Add(prefix + "_" + metric.GetName()) + + help := "# HELP " + prefixedName + " Metric for " + data.Object + typeT := "# TYPE " + prefixedName + " histogram" + rendered = append(rendered, []byte(help), []byte(typeT)) + renderedBytes += uint64(len(help)) + uint64(len(typeT)) + } + + normalizedNames, canNormalize := normalizedLabels[objectMetric] + var ( + countMetric string + sumMetric string + ) + if canNormalize { + count, sum := h.ComputeCountAndSum(normalizedNames) + countMetric = prefix + "_" + metric.GetName() + "_count{" + joinedKeys + "} " + count + sumMetric = prefix + "_" + metric.GetName() + "_sum{" + joinedKeys + "} " + strconv.Itoa(sum) + } + for i, value := range h.Values { + bucketName := (*bucketNames)[i] + var x string + if canNormalize { + x = prefix + "_" + metric.GetName() + "_bucket{" + joinedKeys + `,le="` + normalizedNames[i] + `"} ` + value + if timestamp != "" { + x += " " + timestamp + } + } else { + x = prefix + "_" + metric.GetName() + "{" + joinedKeys + `,` + Escape(replacer, "metric", bucketName) + "} " + value + if timestamp != "" { + x += " " + timestamp + } + } + rendered = append(rendered, []byte(x)) + renderedBytes += uint64(len(x)) + } + if canNormalize { + rendered = append(rendered, []byte(countMetric), []byte(sumMetric)) + renderedBytes += uint64(len(countMetric)) + uint64(len(sumMetric)) + } + } + } + + // Both memory and disk cache add a newline character after each metric line + // when serving via HTTP (see writeMetric() and writeToDisk()) + renderedBytes += uint64(len(rendered)) // Add 1 byte per line for '\n' + + stats := exporter.Stats{ + InstancesExported: instancesExported, + MetricsExported: uint64(len(rendered)), + RenderedBytes: renderedBytes, + } + + return rendered, stats, metricNames +} diff --git a/cmd/exporters/victoriametrics/victoriametrics.go b/cmd/exporters/victoriametrics/victoriametrics.go new file mode 100644 index 000000000..b60bbad5a --- /dev/null +++ b/cmd/exporters/victoriametrics/victoriametrics.go @@ -0,0 +1,223 @@ +package victoriametrics + +import ( + "bytes" + "fmt" + "github.com/netapp/harvest/v2/cmd/exporters" + "github.com/netapp/harvest/v2/cmd/poller/exporter" + "github.com/netapp/harvest/v2/pkg/errs" + "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/netapp/harvest/v2/pkg/requests" + "github.com/netapp/harvest/v2/pkg/slogx" + "io" + "log/slog" + "net/http" + "strconv" + "strings" + "sync" + "time" +) + +const ( + defaultPort = 8428 + defaultTimeout = 5 + defaultAPIVersion = "1" + globalPrefix = "" + expectedResponseCode = 204 +) + +type VictoriaMetrics struct { + *exporter.AbstractExporter + client *http.Client + url string + addMetaTags bool + globalPrefix string + bufferPool *sync.Pool +} + +func New(abc *exporter.AbstractExporter) exporter.Exporter { + return &VictoriaMetrics{AbstractExporter: abc} +} + +func (v *VictoriaMetrics) Init() error { + + if err := v.InitAbc(); err != nil { + return err + } + + // Initialize buffer pool + v.bufferPool = &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + } + + var ( + url, addr, version *string + port *int + ) + + if instance, err := v.Metadata.NewInstance("http"); err == nil { + instance.SetLabel("task", "http") + } else { + return err + } + + if instance, err := v.Metadata.NewInstance("info"); err == nil { + instance.SetLabel("task", "info") + } else { + return err + } + + if x := v.Params.GlobalPrefix; x != nil { + v.Logger.Debug("use global prefix", slog.String("prefix", *x)) + v.globalPrefix = *x + if !strings.HasSuffix(v.globalPrefix, "_") { + v.globalPrefix += "_" + } + } else { + v.globalPrefix = globalPrefix + } + + // Checking the required/optional params + // customer should either provide url or addr + // url is expected to be the full write URL api/v1/import/prometheus + // when url is defined, addr and port are ignored + + // addr is expected to include host only (no port) + // when addr is defined, port is required + + dbEndpoint := "addr" + if url = v.Params.URL; url != nil { + v.url = *url + dbEndpoint = "url" + } else { + if addr = v.Params.Addr; addr == nil { + v.Logger.Error("missing url or addr") + return errs.New(errs.ErrMissingParam, "url or addr") + } + if port = v.Params.Port; port == nil { + v.Logger.Debug("using default port", slog.Int("default", defaultPort)) + defPort := defaultPort + port = &defPort + } + if version = v.Params.Version; version == nil { + v := defaultAPIVersion + version = &v + } + v.Logger.Debug("using api version", slog.String("version", *version)) + + //goland:noinspection HttpUrlsUsage + urlToUse := "http://" + *addr + ":" + strconv.Itoa(*port) + url = &urlToUse + v.url = fmt.Sprintf("%s/api/v%s/import/prometheus", *url, *version) + } + + // timeout parameter + timeout := time.Duration(defaultTimeout) * time.Second + if ct := v.Params.ClientTimeout; ct != nil { + if t, err := strconv.Atoi(*ct); err == nil { + timeout = time.Duration(t) * time.Second + } else { + v.Logger.Warn( + "invalid client_timeout, using default", + slog.String("client_timeout", *ct), + slog.Int("default", defaultTimeout), + ) + } + } else { + v.Logger.Debug("using default client_timeout", slog.Int("default", defaultTimeout)) + } + + v.Logger.Debug("initializing exporter", slog.String("endpoint", dbEndpoint), slog.String("url", v.url)) + + // construct HTTP client + v.client = &http.Client{Timeout: timeout} + + return nil +} + +func (v *VictoriaMetrics) Export(data *matrix.Matrix) (exporter.Stats, error) { + + var ( + metrics [][]byte + err error + s time.Time + stats exporter.Stats + ) + + v.Lock() + defer v.Unlock() + + s = time.Now() + + // update timestamp when backfill historical data, Ex: time.Now().Add(-1*24*time.Hour) + timestamp := strconv.FormatInt(time.Now().Unix(), 10) // Ex: "1762933202" + + // render metrics into open metrics format with timestamp + metrics, stats, _ = exporters.Render(data, v.addMetaTags, v.Params.SortLabels, v.globalPrefix, v.Logger, timestamp) + + // fix render time + if err = v.Metadata.LazyAddValueInt64("time", "render", time.Since(s).Microseconds()); err != nil { + v.Logger.Error("metadata render time", slogx.Err(err)) + } + // in test mode, don't emit metrics + if v.Options.IsTest { + return stats, nil + // otherwise, to the actual export: send to the DB + } else if err = v.Emit(metrics); err != nil { + return stats, fmt.Errorf("unable to emit object: %s, uuid: %s, err=%w", data.Object, data.UUID, err) + } + + v.Logger.Debug( + "exported", + slog.String("object", data.Object), + slog.String("uuid", data.UUID), + slog.Int("numMetric", len(metrics)), + ) + + // update metadata + if err = v.Metadata.LazySetValueInt64("time", "export", time.Since(s).Microseconds()); err != nil { + v.Logger.Error("metadata export time", slogx.Err(err)) + } + + // render metadata metrics into open metrics format with timestamp + metrics, stats, _ = exporters.Render(v.Metadata, v.addMetaTags, v.Params.SortLabels, v.globalPrefix, v.Logger, timestamp) + if err = v.Emit(metrics); err != nil { + v.Logger.Error("emit metadata", slogx.Err(err)) + } + + return stats, nil +} + +func (v *VictoriaMetrics) Emit(data [][]byte) error { + var buffer *bytes.Buffer + var request *http.Request + var response *http.Response + var err error + + buffer = v.bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + _, _ = buffer.Write(bytes.Join(data, []byte("\n"))) + + defer v.bufferPool.Put(buffer) + + if request, err = requests.New("POST", v.url, buffer); err != nil { + return err + } + + if response, err = v.client.Do(request); err != nil { + return err + } + + //goland:noinspection GoUnhandledErrorResult + defer response.Body.Close() + if response.StatusCode != expectedResponseCode { + body, err := io.ReadAll(response.Body) + if err != nil { + return errs.New(errs.ErrAPIResponse, err.Error()) + } + return fmt.Errorf("%w: %s", errs.ErrAPIRequestRejected, string(body)) + } + return nil +} diff --git a/cmd/exporters/victoriametrics/victoriametrics_test.go b/cmd/exporters/victoriametrics/victoriametrics_test.go new file mode 100644 index 000000000..7f9de5c15 --- /dev/null +++ b/cmd/exporters/victoriametrics/victoriametrics_test.go @@ -0,0 +1,86 @@ +package victoriametrics + +import ( + "github.com/netapp/harvest/v2/assert" + "github.com/netapp/harvest/v2/cmd/poller/exporter" + "github.com/netapp/harvest/v2/cmd/poller/options" + "github.com/netapp/harvest/v2/pkg/conf" + "github.com/netapp/harvest/v2/pkg/matrix" + "testing" +) + +func setupVictoriaMetrics(t *testing.T, exporterName string) *VictoriaMetrics { + opts := options.New() + opts.IsTest = true + + _, err := conf.LoadHarvestConfig("../../tools/doctor/testdata/testConfig.yml") + assert.Nil(t, err) + e, ok := conf.Config.Exporters[exporterName] + assert.True(t, ok) + + victoriametrics := &VictoriaMetrics{AbstractExporter: exporter.New("VictoriaMetrics", exporterName, opts, e, nil)} + err = victoriametrics.Init() + assert.Nil(t, err) + + return victoriametrics +} + +func TestAddrParameter(t *testing.T) { + expectedURL := "http://localhost:8428/api/v1/import/prometheus" + exporterName := "victoriametrics-test-addr" + victoriametrics := setupVictoriaMetrics(t, exporterName) + + assert.Equal(t, victoriametrics.url, expectedURL) +} + +func TestUrlParameter(t *testing.T) { + expectedURL := "http://localhost:8428/api/v1/import/prometheus" + exporterName := "victoriametrics-test-url" + victoriametrics := setupVictoriaMetrics(t, exporterName) + + assert.Equal(t, victoriametrics.url, expectedURL) +} + +// test that the addr, port and version parameters are handled properly to construct server URL +func TestVersionParameter(t *testing.T) { + expectedURL := "http://localhost:8400/api/v4/import/prometheus" + exporterName := "victoriametrics-test-version" + victoriametrics := setupVictoriaMetrics(t, exporterName) + + assert.Equal(t, victoriametrics.url, expectedURL) +} + +// test that `addr` field is ignored when using the `url` field +func TestUrlIgnores(t *testing.T) { + expectedURL := "https://example.com:8428/api/v1/import/prometheus" + exporterName := "victoriametrics-with-url" + victoriametrics := setupVictoriaMetrics(t, exporterName) + + assert.Equal(t, victoriametrics.url, expectedURL) +} + +// test rendering +func TestExportDebug(t *testing.T) { + exporterName := "victoriametrics-test-url" + victoriametrics := setupVictoriaMetrics(t, exporterName) + + // matrix with fake data + data := matrix.New("test_exporter", "vm_test_data", "vm_test_data") + data.SetExportOptions(matrix.DefaultExportOptions()) + + // add metric + m, err := data.NewMetricInt64("test_metric") + assert.Nil(t, err) + + // add instance + i, err := data.NewInstance("test_instance") + assert.Nil(t, err) + i.SetLabel("test_label", "test_label_value") + + // add numeric data + m.SetValueInt64(i, 42) + + // render data + _, err = victoriametrics.Export(data) + assert.Nil(t, err) +} diff --git a/cmd/poller/poller.go b/cmd/poller/poller.go index 2a850fd5a..3c3c03b5b 100644 --- a/cmd/poller/poller.go +++ b/cmd/poller/poller.go @@ -32,6 +32,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/netapp/harvest/v2/cmd/exporters/victoriametrics" "io" "log/slog" "math" @@ -1117,6 +1118,8 @@ func (p *Poller) loadExporter(name string) exporter.Exporter { exp = prometheus.New(absExp) case "InfluxDB": exp = influxdb.New(absExp) + case "VictoriaMetrics": + exp = victoriametrics.New(absExp) default: logger.Error("no exporter of name:type", slog.String("name", name), slog.String("type", class)) return nil diff --git a/cmd/tools/doctor/doctor.go b/cmd/tools/doctor/doctor.go index 66b334e7c..f24b5217a 100644 --- a/cmd/tools/doctor/doctor.go +++ b/cmd/tools/doctor/doctor.go @@ -403,7 +403,7 @@ func checkExporterTypes(config conf.HarvestConfig) validation { if exporter.Type == "" { continue } - if exporter.Type == "Prometheus" || exporter.Type == "InfluxDB" { + if exporter.Type == "Prometheus" || exporter.Type == "InfluxDB" || exporter.Type == "VictoriaMetrics" { continue } invalidTypes[name] = exporter.Type diff --git a/cmd/tools/doctor/testdata/testConfig.yml b/cmd/tools/doctor/testdata/testConfig.yml index 8e5827cbf..a2ce60d48 100644 --- a/cmd/tools/doctor/testdata/testConfig.yml +++ b/cmd/tools/doctor/testdata/testConfig.yml @@ -66,6 +66,21 @@ Exporters: exporter: Foo1 foo2: exporter: Foo2 + victoriametrics-test-addr: + exporter: VictoriaMetrics + addr: localhost + victoriametrics-test-url: + exporter: VictoriaMetrics + url: http://localhost:8428/api/v1/import/prometheus + victoriametrics-test-version: + exporter: VictoriaMetrics + addr: localhost + version: 4 + port: 8400 + victoriametrics-with-url: + exporter: VictoriaMetrics + addr: localhost + url: https://example.com:8428/api/v1/import/prometheus Defaults: collectors: diff --git a/docs/concepts.md b/docs/concepts.md index 7e21e7666..e4c71b3f3 100644 --- a/docs/concepts.md +++ b/docs/concepts.md @@ -80,6 +80,15 @@ None of the [pollers](#poller) know anything about Prometheus. That's because Pr - [InfluxDB Exporter](influxdb-exporter.md) +## VictoriaMetrics + +[VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/) is an open-source time-series database. Existing Harvest dashboards supported for Prometheus will also work with VictoriaMetrics. Unlike the Prometheus exporter, Harvest's VictoriaMetrics exporter pushes metrics from the poller to VictoriaMetrics via Prometheus exposition format. The exporter is compatible with VictoriaMetrics v1.129.1. + + +**More information:** + +- [VictoriaMetrics Exporter](victoriametrics-exporter.md) + ## Dashboards Harvest ships with a set of [Grafana](https://grafana.com/) dashboards that are primarily designed to work with Prometheus. The dashboards are located in the `grafana/dashboards` directory. Harvest does not include Grafana, only the dashboards for it. Grafana must be installed separately via Docker, NAbox, or other means. diff --git a/docs/configure-harvest-basic.md b/docs/configure-harvest-basic.md index 30415f136..45d0915dc 100644 --- a/docs/configure-harvest-basic.md +++ b/docs/configure-harvest-basic.md @@ -58,6 +58,8 @@ data to a database, NOT the names used to refer to the actual databases. ### [InfluxDB Exporter](influxdb-exporter.md) +### [VictoriaMetrics Exporter](victoriametrics-exporter.md) + ## Tools This section is optional. You can uncomment the `grafana_api_token` key and add your Grafana API token so `harvest` does diff --git a/docs/victoriametrics-exporter.md b/docs/victoriametrics-exporter.md new file mode 100644 index 000000000..adede7da1 --- /dev/null +++ b/docs/victoriametrics-exporter.md @@ -0,0 +1,54 @@ +# VictoriaMetrics Exporter + +???+ note "VictoriaMetrics Install" + + The information below describes how to setup Harvest's VictoriaMetrics exporter. + If you need help installing or setting up VictoriaMetrics, check + out [their documentation](https://docs.victoriametrics.com/victoriametrics/). + +## Overview + +The VictoriaMetrics Exporter will format metrics into [Prometheus exposition format](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#naming-restrictions). +The Exporter is compatible with VictoriaMetrics v1.129.1. + +## Parameters + +Overview of all parameters is provided below. Only one of `url` or `addr` should be provided and at least one of them is +required. +If `addr` is specified, it should be a valid TCP address or hostname of the VictoriaMetrics server and should not include the +scheme and port. + +> `addr` only works with HTTP. If you need to use HTTPS, you should use `url` instead. + +If `url` is specified, you must add all arguments to the url. +Harvest will do no additional processing and use exactly what you specify. ( +e.g. `url: http://localhost:8428/api/v1/import/prometheus`. +When using `url`, the `addr` and `port` field will be ignored. + +| parameter | type | description | default | +|------------------|------------------------------|----------------------------------------------------------------------------------------------------|---------| +| `url` | string | URL of the database, format: `SCHEME://HOST[:PORT]` | | +| `addr` | string | address of the database, format: `HOST` (HTTP only) | | +| `port` | int, optional | port of the database | `8086` | +| `client_timeout` | int, optional | client timeout in seconds | `5` | + +### Example + +snippet from `harvest.yml` using `addr`: (supports HTTP only)) + +```yaml +Exporters: + my_victoriametrics: + exporter: VictoriaMetrics + addr: localhost +``` + +snippet from `harvest.yml` using `url`: (supports both HTTP/HTTPS)) + +```yaml +Exporters: + victoriametrics2: + exporter: VictoriaMetrics + url: http://localhost:8428/api/v1/import/prometheus +``` + diff --git a/mkdocs.yml b/mkdocs.yml index cb928f75b..5ae57ed3c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -30,6 +30,7 @@ nav: - Configure Exporters: - 'Prometheus': 'prometheus-exporter.md' - 'InfluxDB': 'influxdb-exporter.md' + - 'VictoriaMetrics': 'victoriametrics-exporter.md' - Configure Grafana: 'configure-grafana.md' - Configure Collectors: - 'ZAPI': 'configure-zapi.md' diff --git a/pkg/tree/tree_test.go b/pkg/tree/tree_test.go index 882896ef2..8e28471ba 100644 --- a/pkg/tree/tree_test.go +++ b/pkg/tree/tree_test.go @@ -118,7 +118,7 @@ func TestHarvestConfigImportYaml(t *testing.T) { } } - want = 13 + want = 17 got = 0 if exporters := template.GetChildS("Exporters"); exporters != nil { for range exporters.GetChildren() {