Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 17 additions & 106 deletions cmd/exporters/prometheus/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ package prometheus

import (
"bytes"
"github.com/netapp/harvest/v2/cmd/exporters"
"github.com/netapp/harvest/v2/cmd/poller/exporter"
"github.com/netapp/harvest/v2/cmd/poller/plugin/changelog"
"github.com/netapp/harvest/v2/pkg/errs"
Expand Down Expand Up @@ -76,7 +77,7 @@ func (p *Prometheus) Init() error {
return err
}

p.replacer = newReplacer()
p.replacer = exporters.NewReplacer()

if instance, err := p.Metadata.NewInstance("info"); err == nil {
instance.SetLabel("task", "info")
Expand Down Expand Up @@ -192,10 +193,6 @@ func (p *Prometheus) Init() error {
return nil
}

func newReplacer() *strings.Replacer {
return strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "\\n")
}

// Export - Unlike other Harvest exporters, we don't export data
// but put it in cache. The HTTP daemon serves that cache on request.
//
Expand Down Expand Up @@ -270,7 +267,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
prefix string
err error
joinedKeys string
histograms map[string]*histogram
histograms map[string]*exporters.Histogram
normalizedLabels map[string][]string // cache of histogram normalized labels
instancesExported uint64
renderedBytes uint64
Expand Down Expand Up @@ -318,7 +315,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
}

for key, value := range data.GetGlobalLabels() {
globalLabels = append(globalLabels, escape(p.replacer, key, value))
globalLabels = append(globalLabels, exporters.Escape(p.replacer, key, value))
}

// Count the number of metrics so the rendered slice can be sized without reallocation
Expand Down Expand Up @@ -387,14 +384,14 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
// instance label (even though it's already a global label for 7modes)
_, ok := data.GetGlobalLabels()[label]
if !ok {
escaped := escape(p.replacer, label, value)
escaped := exporters.Escape(p.replacer, label, value)
instanceKeys = append(instanceKeys, escaped)
}
}
} else {
for _, key := range keysToInclude {
value := instance.GetLabel(key)
escaped := escape(p.replacer, key, value)
escaped := exporters.Escape(p.replacer, key, value)
instanceKeys = append(instanceKeys, escaped)
if !instanceKeysOk && value != "" {
instanceKeysOk = true
Expand All @@ -403,7 +400,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {

for _, label := range labelsToInclude {
value := instance.GetLabel(label)
kv := escape(p.replacer, label, value)
kv := exporters.Escape(p.replacer, label, value)
_, ok := instanceLabelsSet[kv]
if ok {
continue
Expand Down Expand Up @@ -463,7 +460,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
}

joinedKeys = strings.Join(instanceKeys, ",")
histograms = make(map[string]*histogram)
histograms = make(map[string]*exporters.Histogram)

for _, metric := range data.GetMetrics() {

Expand Down Expand Up @@ -495,13 +492,13 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
slog.String("index", metricIndex),
)
}
histogram := histogramFromBucket(histograms, bucketMetric)
histogram.values[index] = value
histogram := exporters.HistogramFromBucket(histograms, bucketMetric)
histogram.Values[index] = value
continue
}
metricLabels := make([]string, 0, len(metric.GetLabels()))
for k, v := range metric.GetLabels() {
metricLabels = append(metricLabels, escape(p.replacer, k, v))
metricLabels = append(metricLabels, exporters.Escape(p.replacer, k, v))
}
if p.Params.SortLabels {
sort.Strings(metricLabels)
Expand Down Expand Up @@ -581,7 +578,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
// All metrics have been processed and flattened metrics accumulated. Determine which histograms can be
// normalized and exported.
for _, h := range histograms {
metric := h.metric
metric := h.Metric
bucketNames := metric.Buckets()
objectMetric := data.Object + "_" + metric.GetName()
_, ok := normalizedLabels[objectMetric]
Expand All @@ -590,7 +587,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
normalizedNames := make([]string, 0, len(*bucketNames))
// check if the buckets can be normalized and collect normalized names
for _, bucketName := range *bucketNames {
normalized := p.normalizeHistogram(bucketName)
normalized := exporters.NormalizeHistogram(bucketName)
if normalized == "" {
canNormalize = false
break
Expand All @@ -605,7 +602,7 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
// Before writing out the histogram, check that every bucket value is non-empty.
// Some bucket values may be empty if certain bucket metrics were skipped in the collector while others were not.
allBucketsHaveValues := true
if slices.Contains(h.values, "") {
if slices.Contains(h.Values, "") {
allBucketsHaveValues = false
}
if !allBucketsHaveValues {
Expand All @@ -629,17 +626,17 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {
sumMetric string
)
if canNormalize {
count, sum := h.computeCountAndSum(normalizedNames)
count, sum := h.ComputeCountAndSum(normalizedNames)
countMetric = prefix + "_" + metric.GetName() + "_count{" + joinedKeys + "} " + count
sumMetric = prefix + "_" + metric.GetName() + "_sum{" + joinedKeys + "} " + strconv.Itoa(sum)
}
for i, value := range h.values {
for i, value := range h.Values {
bucketName := (*bucketNames)[i]
var x string
if canNormalize {
x = prefix + "_" + metric.GetName() + "_bucket{" + joinedKeys + `,le="` + normalizedNames[i] + `"} ` + value
} else {
x = prefix + "_" + metric.GetName() + "{" + joinedKeys + `,` + escape(p.replacer, "metric", bucketName) + "} " + value
x = prefix + "_" + metric.GetName() + "{" + joinedKeys + `,` + exporters.Escape(p.replacer, "metric", bucketName) + "} " + value
}
rendered = append(rendered, []byte(x))
renderedBytes += uint64(len(x))
Expand All @@ -659,89 +656,3 @@ func (p *Prometheus) render(data *matrix.Matrix) ([][]byte, exporter.Stats) {

return rendered, stats
}

var numAndUnitRe = regexp.MustCompile(`(\d+)\s*(\w+)`)

// normalizeHistogram tries to normalize ONTAP values by converting units to multiples of the smallest unit.
// When the unit cannot be determined, return an empty string
func (p *Prometheus) normalizeHistogram(ontap string) string {
numAndUnit := ontap
if strings.HasPrefix(ontap, "<") {
numAndUnit = ontap[1:]
} else if strings.HasPrefix(ontap, ">") {
return "+Inf"
}
submatch := numAndUnitRe.FindStringSubmatch(numAndUnit)
if len(submatch) != 3 {
return ""
}
num := submatch[1]
unit := submatch[2]
float, err := strconv.ParseFloat(num, 64)
if err != nil {
return ""
}
var normal float64
switch unit {
case "us":
return num
case "ms", "msec":
normal = 1_000 * float
case "s", "sec":
normal = 1_000_000 * float
default:
return ""
}
return strconv.FormatFloat(normal, 'f', -1, 64)
}

func histogramFromBucket(histograms map[string]*histogram, metric *matrix.Metric) *histogram {
h, ok := histograms[metric.GetName()]
if ok {
return h
}
buckets := metric.Buckets()
var capacity int
if buckets != nil {
capacity = len(*buckets)
}
h = &histogram{
metric: metric,
values: make([]string, capacity),
}
histograms[metric.GetName()] = h
return h
}

func escape(replacer *strings.Replacer, key string, value string) string {
// See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information
// label_value can be any sequence of UTF-8 characters, but the backslash (\), double-quote ("),
// and line feed (\n) characters have to be escaped as \\, \", and \n, respectively.

return key + "=" + strconv.Quote(replacer.Replace(value))
}

type histogram struct {
metric *matrix.Metric
values []string
}

func (h *histogram) computeCountAndSum(normalizedNames []string) (string, int) {
// If the buckets are normalizable, iterate through the values to:
// 1) calculate Prometheus's cumulative buckets
// 2) add _count metric
// 3) calculate and add _sum metric
cumValues := make([]string, len(h.values))
runningTotal := 0
sum := 0
for i, value := range h.values {
num, _ := strconv.Atoi(value)
runningTotal += num
cumValues[i] = strconv.Itoa(runningTotal)
normalName := normalizedNames[i]
leValue, _ := strconv.Atoi(normalName)
sum += leValue * num
}
h.values = cumValues
return cumValues[len(cumValues)-1], sum
}
9 changes: 5 additions & 4 deletions cmd/exporters/prometheus/prometheus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package prometheus
import (
"github.com/google/go-cmp/cmp"
"github.com/netapp/harvest/v2/assert"
"github.com/netapp/harvest/v2/cmd/exporters"
"github.com/netapp/harvest/v2/cmd/poller/exporter"
"github.com/netapp/harvest/v2/cmd/poller/options"
"github.com/netapp/harvest/v2/pkg/conf"
Expand Down Expand Up @@ -48,7 +49,7 @@ some_other_metric{node="node_3"} 0.0
}

func TestEscape(t *testing.T) {
replacer := newReplacer()
replacer := exporters.NewReplacer()

type test struct {
key string
Expand All @@ -65,16 +66,16 @@ func TestEscape(t *testing.T) {

for _, tc := range tests {
t.Run(tc.want, func(t *testing.T) {
got := escape(replacer, tc.key, tc.value)
got := exporters.Escape(replacer, tc.key, tc.value)
assert.Equal(t, got, tc.want)
})
}
}

func BenchmarkEscape(b *testing.B) {
replacer := newReplacer()
replacer := exporters.NewReplacer()
for b.Loop() {
escape(replacer, "abc", `a\c"foo"\ndef`)
exporters.Escape(replacer, "abc", `a\c"foo"\ndef`)
}
}

Expand Down
98 changes: 98 additions & 0 deletions cmd/exporters/utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
package exporters

import (
"github.com/netapp/harvest/v2/pkg/matrix"
"regexp"
"strconv"
"strings"
)

var numAndUnitRe = regexp.MustCompile(`(\d+)\s*(\w+)`)

type Histogram struct {
Metric *matrix.Metric
Values []string
}

func Escape(replacer *strings.Replacer, key string, value string) string {
// See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information
// label_value can be any sequence of UTF-8 characters, but the backslash (\), double-quote ("),
// and line feed (\n) characters have to be escaped as \\, \", and \n, respectively.

return key + "=" + strconv.Quote(replacer.Replace(value))
}

func NewReplacer() *strings.Replacer {
return strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "\\n")
}

func HistogramFromBucket(histograms map[string]*Histogram, metric *matrix.Metric) *Histogram {
h, ok := histograms[metric.GetName()]
if ok {
return h
}
buckets := metric.Buckets()
var capacity int
if buckets != nil {
capacity = len(*buckets)
}
h = &Histogram{
Metric: metric,
Values: make([]string, capacity),
}
histograms[metric.GetName()] = h
return h
}

func (h *Histogram) ComputeCountAndSum(normalizedNames []string) (string, int) {
// If the buckets are normalizable, iterate through the values to:
// 1) calculate Prometheus's cumulative buckets
// 2) add _count metric
// 3) calculate and add _sum metric
cumValues := make([]string, len(h.Values))
runningTotal := 0
sum := 0
for i, value := range h.Values {
num, _ := strconv.Atoi(value)
runningTotal += num
cumValues[i] = strconv.Itoa(runningTotal)
normalName := normalizedNames[i]
leValue, _ := strconv.Atoi(normalName)
sum += leValue * num
}
h.Values = cumValues
return cumValues[len(cumValues)-1], sum
}

// NormalizeHistogram tries to normalize ONTAP values by converting units to multiples of the smallest unit.
// When the unit cannot be determined, return an empty string
func NormalizeHistogram(ontap string) string {
numAndUnit := ontap
if strings.HasPrefix(ontap, "<") {
numAndUnit = ontap[1:]
} else if strings.HasPrefix(ontap, ">") {
return "+Inf"
}
submatch := numAndUnitRe.FindStringSubmatch(numAndUnit)
if len(submatch) != 3 {
return ""
}
num := submatch[1]
unit := submatch[2]
float, err := strconv.ParseFloat(num, 64)
if err != nil {
return ""
}
var normal float64
switch unit {
case "us":
return num
case "ms", "msec":
normal = 1_000 * float
case "s", "sec":
normal = 1_000_000 * float
default:
return ""
}
return strconv.FormatFloat(normal, 'f', -1, 64)
}
Loading
Loading