diff --git a/examples/alerts/gun.go b/examples/alerts/gun.go index 4dee28e0..a31c2fd0 100644 --- a/examples/alerts/gun.go +++ b/examples/alerts/gun.go @@ -20,16 +20,16 @@ func NewExampleHTTPGun(target string) *ExampleGun { } // Call implements example gun call, assertions on response bodies should be done here -func (m *ExampleGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *ExampleGun) Call(l *wasp.Generator) *wasp.Response { var result map[string]interface{} r, err := m.client.R(). SetResult(&result). Get(m.target) if err != nil { - return &wasp.CallResult{Data: result, Error: err.Error()} + return &wasp.Response{Data: result, Error: err.Error()} } if r.Status() != "200 OK" { - return &wasp.CallResult{Data: result, Error: "not 200", Failed: true} + return &wasp.Response{Data: result, Error: "not 200", Failed: true} } - return &wasp.CallResult{Data: result} + return &wasp.Response{Data: result} } diff --git a/examples/cluster/gun.go b/examples/cluster/gun.go index 4dee28e0..a31c2fd0 100644 --- a/examples/cluster/gun.go +++ b/examples/cluster/gun.go @@ -20,16 +20,16 @@ func NewExampleHTTPGun(target string) *ExampleGun { } // Call implements example gun call, assertions on response bodies should be done here -func (m *ExampleGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *ExampleGun) Call(l *wasp.Generator) *wasp.Response { var result map[string]interface{} r, err := m.client.R(). SetResult(&result). Get(m.target) if err != nil { - return &wasp.CallResult{Data: result, Error: err.Error()} + return &wasp.Response{Data: result, Error: err.Error()} } if r.Status() != "200 OK" { - return &wasp.CallResult{Data: result, Error: "not 200", Failed: true} + return &wasp.Response{Data: result, Error: "not 200", Failed: true} } - return &wasp.CallResult{Data: result} + return &wasp.Response{Data: result} } diff --git a/examples/profiles/gun.go b/examples/profiles/gun.go index 372e12e8..b6af41e1 100644 --- a/examples/profiles/gun.go +++ b/examples/profiles/gun.go @@ -20,16 +20,16 @@ func NewExampleHTTPGun(target string) *ExampleGun { } // Call implements example gun call, assertions on response bodies should be done here -func (m *ExampleGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *ExampleGun) Call(l *wasp.Generator) *wasp.Response { var result map[string]interface{} r, err := m.client.R(). SetResult(&result). Get(m.target) if err != nil { - return &wasp.CallResult{Data: result, Error: err.Error()} + return &wasp.Response{Data: result, Error: err.Error()} } if r.Status() != "200 OK" { - return &wasp.CallResult{Data: result, Error: "not 200"} + return &wasp.Response{Data: result, Error: "not 200"} } - return &wasp.CallResult{Data: result} + return &wasp.Response{Data: result} } diff --git a/examples/profiles/vu.go b/examples/profiles/vu.go index e8d85eba..50cbd40d 100644 --- a/examples/profiles/vu.go +++ b/examples/profiles/vu.go @@ -5,6 +5,7 @@ import ( "time" "fmt" + "github.com/smartcontractkit/wasp" "nhooyr.io/websocket" "nhooyr.io/websocket/wsjson" @@ -56,10 +57,10 @@ func (m *WSVirtualUser) Call(l *wasp.Generator) { err := wsjson.Read(context.Background(), m.conn, &v) if err != nil { l.Log.Error().Err(err).Msg("failed read ws msg from vu") - l.ResponsesChan <- &wasp.CallResult{StartedAt: &startedAt, Data: v, Error: fmt.Sprintf("read error: %s", err.Error())} + l.ResponsesChan <- &wasp.Response{StartedAt: &startedAt, Data: v, Error: fmt.Sprintf("read error: %s", err.Error())} return } - l.ResponsesChan <- &wasp.CallResult{StartedAt: &startedAt, Data: v} + l.ResponsesChan <- &wasp.Response{StartedAt: &startedAt, Data: v} } func (m *WSVirtualUser) Stop(_ *wasp.Generator) { diff --git a/examples/simple_rps/gun.go b/examples/simple_rps/gun.go index 372e12e8..b6af41e1 100644 --- a/examples/simple_rps/gun.go +++ b/examples/simple_rps/gun.go @@ -20,16 +20,16 @@ func NewExampleHTTPGun(target string) *ExampleGun { } // Call implements example gun call, assertions on response bodies should be done here -func (m *ExampleGun) Call(l *wasp.Generator) *wasp.CallResult { +func (m *ExampleGun) Call(l *wasp.Generator) *wasp.Response { var result map[string]interface{} r, err := m.client.R(). SetResult(&result). Get(m.target) if err != nil { - return &wasp.CallResult{Data: result, Error: err.Error()} + return &wasp.Response{Data: result, Error: err.Error()} } if r.Status() != "200 OK" { - return &wasp.CallResult{Data: result, Error: "not 200"} + return &wasp.Response{Data: result, Error: "not 200"} } - return &wasp.CallResult{Data: result} + return &wasp.Response{Data: result} } diff --git a/examples/simple_vu/vu.go b/examples/simple_vu/vu.go index 21db6e0c..23a428ef 100644 --- a/examples/simple_vu/vu.go +++ b/examples/simple_vu/vu.go @@ -54,10 +54,10 @@ func (m *WSVirtualUser) Call(l *wasp.Generator) { err := wsjson.Read(context.Background(), m.conn, &v) if err != nil { l.Log.Error().Err(err).Msg("failed read ws msg from vu") - l.ResponsesChan <- &wasp.CallResult{StartedAt: &startedAt, Error: err.Error(), Failed: true} + l.ResponsesChan <- &wasp.Response{StartedAt: &startedAt, Error: err.Error(), Failed: true} return } - l.ResponsesChan <- &wasp.CallResult{StartedAt: &startedAt, Data: v} + l.ResponsesChan <- &wasp.Response{StartedAt: &startedAt, Data: v} } func (m *WSVirtualUser) Stop(_ *wasp.Generator) { diff --git a/go.mod b/go.mod index dde19a3e..c3634c7b 100644 --- a/go.mod +++ b/go.mod @@ -7,21 +7,21 @@ require ( github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 github.com/gin-gonic/gin v1.8.1 github.com/go-resty/resty/v2 v2.7.0 - github.com/google/uuid v1.3.0 - github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 - github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6 + github.com/google/uuid v1.3.1 + github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f + // loki main 12/15/2023 + github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 - github.com/prometheus/common v0.42.0 + github.com/prometheus/common v0.44.0 github.com/pyroscope-io/client v0.6.0 github.com/rs/zerolog v1.29.0 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 go.uber.org/ratelimit v0.2.0 - // google.golang.org/grpc version 1.58.x and above are no longer compatible with loki at 990ac685e6a6 - google.golang.org/grpc v1.57.2 // indirect - k8s.io/api v0.26.2 - k8s.io/apimachinery v0.26.2 - k8s.io/client-go v0.26.2 + google.golang.org/grpc v1.59.0 // indirect + k8s.io/api v0.28.1 + k8s.io/apimachinery v0.28.1 + k8s.io/client-go v0.28.1 nhooyr.io/websocket v1.8.7 ) @@ -32,37 +32,37 @@ require ( github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.44.217 // indirect + github.com/aws/aws-sdk-go v1.44.321 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee // indirect + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect; indirectf github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/fatih/color v1.14.1 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.3 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/spec v0.20.8 // indirect - github.com/go-openapi/strfmt v0.21.3 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect - github.com/go-playground/locales v0.14.0 // indirect - github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-playground/validator/v10 v10.11.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.11.2 // indirect github.com/goccy/go-json v0.9.11 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -70,18 +70,17 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosimple/slug v1.13.1 // indirect github.com/gosimple/unidecode v1.0.1 // indirect - github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/hashicorp/consul/api v1.20.0 // indirect + github.com/hashicorp/consul/api v1.25.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -90,19 +89,19 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.16.3 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.51 // indirect + github.com/miekg/dns v1.1.55 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -115,59 +114,86 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/alertmanager v0.25.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/alertmanager v0.26.0 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.9.1 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2 // indirect + github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect + github.com/prometheus/procfs v0.11.0 // indirect + github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 // indirect github.com/pyroscope-io/godeltaprof v0.1.2 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/sercand/kuberesolver/v4 v4.0.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/ugorji/go/codec v1.2.7 // indirect - github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205 // indirect - github.com/weaveworks/promrus v1.2.0 // indirect go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.7 // indirect - go.mongodb.org/mongo-driver v1.11.2 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/metric v0.37.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.18.0 // indirect + go.opentelemetry.io/otel/metric v1.18.0 // indirect + go.opentelemetry.io/otel/trace v1.18.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.2.1 // indirect - go.uber.org/multierr v1.9.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/mod v0.9.0 // indirect + golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sync v0.1.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/tools v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect - k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect + github.com/go-redis/redis/v8 v8.11.5 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/sercand/kuberesolver/v5 v5.1.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/cast v1.3.1 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 // indirect + go.opentelemetry.io/collector/semconv v0.81.0 // indirect + go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect +) diff --git a/go.sum b/go.sum index 3d86be57..61358d3d 100644 --- a/go.sum +++ b/go.sum @@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -14,385 +13,38 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= @@ -403,6 +55,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -412,11 +66,21 @@ github.com/K-Phoen/grabana v0.21.17 h1:mO/9DvJWC/qpTF/X5jQDm5eKgCBaCGypP/tEfXAvK github.com/K-Phoen/grabana v0.21.17/go.mod h1:vbASQt9UiQhX4lC3/opLpJMJ8m+hsTUU2FwkQMytHK4= github.com/K-Phoen/sdk v0.12.2 h1:0QofDlKE+lloyBOzhjEEMW21061zts/WIpfpQ5NLLAs= github.com/K-Phoen/sdk v0.12.2/go.mod h1:qmM0wO23CtoDux528MXPpYvS4XkRWkWX6rvX9Za8EVU= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= +github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Workiva/go-datastructures v1.1.0 h1:hu20UpgZneBhQ3ZvwiOGlqJSKIosin2Rd5wAKUHEO/k= +github.com/Workiva/go-datastructures v1.1.0/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -425,42 +89,43 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= +github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= -github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.321 h1:iXwFLxWjZPjYqjPq0EcCs46xX7oDLEELte1+BzgpKk8= +github.com/aws/aws-sdk-go v1.44.321/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= -github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -470,80 +135,60 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= -github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= @@ -555,19 +200,17 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -576,12 +219,13 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -591,33 +235,40 @@ github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8en github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= -github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= -github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-playground/validator/v10 v10.11.2 h1:q3SHpufmypg+erIExEKUmsgmhDTyhcJ38oeKGACXohU= +github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -654,22 +305,18 @@ github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -680,8 +327,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -697,20 +342,17 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -719,12 +361,8 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -734,8 +372,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -743,37 +379,16 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= -github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= +github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -783,32 +398,31 @@ github.com/gosimple/slug v1.13.1 h1:bQ+kpX9Qa6tHRaK+fZR0A0M2Kd7Pa5eHPPsb1JpHD+Q= github.com/gosimple/slug v1.13.1/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= -github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 h1:IOks+FXJ6iO/pfbaVEf4efNw+YzYBYNCkCabyrbkFTM= -github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2/go.mod h1:zj+5BNZAVmQafV583uLTAOzRr963KPdEm4d6NPmtbwg= -github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6 h1:V5PspEXlSlNh22sMyGkgfSOVVLTsSmhbmsp1VPt8Fdc= -github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6/go.mod h1:+aWr7OBDuZMT+p0rKmLfW5saO2m3YOGBnt++IlgLhVk= -github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 h1:VXitROTlmZtLzvokNe8ZbUKpmwldM4Hy1zdNRO32jKU= -github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765/go.mod h1:DhJMrd2QInI/1CNtTN43BZuTmkccdizW1jZ+F6aHkhY= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 h1:gdrsYbmk8822v6qvPwZO5DC6QjnAW7uKJ9YXnoUmV8c= +github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503/go.mod h1:d8seWXCEXkL42mhuIJYcGi6DxfehzoIpLrMQWJojvOo= +github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 h1:ZYk42718kSXOiIKdjZKljWLgBpzL5z1yutKABksQCMg= +github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608/go.mod h1:f3JSoxBTPXX5ec4FxxeC19nTBSxoTz+cBgS3cYLMcr0= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= -github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= -github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= +github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -820,8 +434,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= -github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -832,8 +446,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= @@ -842,21 +456,21 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= -github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -885,32 +499,29 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= -github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= -github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -926,24 +537,24 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= -github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -952,6 +563,9 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -966,27 +580,29 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= -github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= -github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -995,99 +611,94 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= -github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= +github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= +github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= -github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= -github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2 h1:i5hmbBzR+VeL5pPl1ZncsJ1bpg3SO66bwkE1msJBsMA= -github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2/go.mod h1:Mm42Acga98xgA+u5yTaC3ki3i0rJEJWFpbdHN7q2trk= +github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= +github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 h1:6ksZ7t1hNOzGPPs8DK7SvXQf6UfWzi+W5Z7PCBl8gx4= +github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= github.com/pyroscope-io/client v0.6.0 h1:rcUFgcnfmuyVYDYT+4d0zfqc8YedOyruHSsUb9ImaBw= github.com/pyroscope-io/client v0.6.0/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= -github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= +github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= +github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -1103,15 +714,12 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -1121,18 +729,13 @@ github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0 github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205 h1:gjb7t9LCnRu14LHubyLIgrE+EYlAaREiPn/VknV7R3s= -github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205/go.mod h1:O9wmSPNVSuqxzUZPFlHnPQ8xnyvx0qBnKGFfGbj95uY= -github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= -github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1140,6 +743,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= @@ -1149,56 +754,52 @@ go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= -go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 h1:8PzrQFk3oKiT1Sd5EmNEcagdMyt1KcBy5/OyF5He5gY= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0015/go.mod h1:I1PqyHJlsXjANC73tp43nDId7/jiv82NoZZ6uS0xdwM= +go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= +go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= +go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= +go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= +go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= +go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= +go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1214,8 +815,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI= +golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1229,8 +830,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1239,14 +838,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1278,37 +873,18 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1316,29 +892,9 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1351,11 +907,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1396,66 +950,34 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1463,20 +985,17 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1502,7 +1021,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1529,31 +1047,15 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1574,41 +1076,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1641,102 +1108,18 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1751,35 +1134,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.57.2 h1:uw37EN34aMFFXB2QPW7Tq6tdTbind1GpRxw5aOX3a5k= -google.golang.org/grpc v1.57.2/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1792,9 +1148,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -1809,9 +1162,10 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1822,7 +1176,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1832,18 +1185,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= -k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= -k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= +k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg= +k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= +k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= +k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= +k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1852,7 +1205,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 00000000..8206a57c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,611 @@ +# Release History + +## 1.7.0 (2023-07-12) + +### Features Added +* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. + +### Breaking Changes +> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 +* The beta features for CAE, tracing, and fakes have been omitted for this release. + +## 1.7.0-beta.2 (2023-06-06) + +### Breaking Changes +> These changes affect only code written against beta version v1.7.0-beta.1 +* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. + * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. +* Method `AddError()` has been removed from type `tracing.Span`. +* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +### Other Changes +* This version contains all bug fixes from `v1.7.0-beta.1` + +## 1.7.0-beta.1 (2023-05-24) + +### Features Added +* Restored CAE support for ARM clients. +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 00000000..48ea6616 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 00000000..35a74e18 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 00000000..aab92185 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 00000000..9d077a3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 00000000..985b1bde --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 00000000..9fae2a9d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,132 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer + + // cached on the client to support shallow copying with new values + tp tracing.Provider + modVer string +} + +// NewClient creates a new Client instance with the provided values. +// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider. +// if module and package are the same value, the "module/" prefix can be omitted. +// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + mod, client, err := shared.ExtractModuleName(clientName) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(client, moduleVersion) + + return &Client{ + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + }, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} + +// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. +// Note that the values for module name and version will be preserved from the source Client. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +func (c *Client) WithClientName(clientName string) *Client { + tr := c.tp.NewTracer(clientName, c.modVer) + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 00000000..28c64678 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 00000000..17bd50c6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 00000000..23ea7e7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 00000000..a1236b36 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "io" + "net/http" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 00000000..c44efd6e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 00000000..fa99d1b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 00000000..7df2f88c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,144 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 00000000..0684cb31 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 00000000..b05bd8b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 00000000..2bb9e105 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 00000000..d6be8987 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 00000000..1bc7ad0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,145 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 00000000..37ed647f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 00000000..d8d86a46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,187 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 00000000..53c8d353 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.7.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 00000000..db0aaa7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or +// "package", "package.Client" from "package.Client" when there's no "module/" prefix. +// If clientName is malformed, an error is returned. +func ExtractModuleName(clientName string) (string, string, error) { + // uses unnamed capturing for "module", "package.Client", and "package" + regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`) + if err != nil { + return "", "", err + } + + matches := regex.FindStringSubmatch(clientName) + if len(matches) < 4 { + return "", "", fmt.Errorf("malformed clientName %s", clientName) + } + + // the first match is the entire string, the second is "module", the third is + // "package.Client" and the fourth is "package". + // if there was no "module/" prefix, the second match will be the empty string + if matches[1] != "" { + return matches[1], matches[2], nil + } + return matches[3], matches[2], nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 00000000..2f3901bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 00000000..7bde29d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 00000000..fad2579e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 00000000..b2000478 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 00000000..c9cfa438 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 00000000..6d03b291 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 00000000..5507665d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 00000000..9d9288f5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,66 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 00000000..e5309aa6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 00000000..b61e4c12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) + } + if err != nil { + return nil, ensureNonRetriable(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + return res, ensureNonRetriable(err) +} + +func ensureNonRetriable(err error) error { + var nre errorinfo.NonRetriable + if err != nil && !errors.As(err, &nre) { + err = btpError{err} + } + return err +} + +// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize +type btpError struct { + error +} + +func (btpError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 00000000..99dc029f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 00000000..770e0a2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 00000000..4714baa3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 00000000..8514f57d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 00000000..360a7f21 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 00000000..e0c5929f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,262 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + clone := req.Clone(req.Raw().Context()) + resp, err = clone.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 00000000..2abcdc57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 00000000..3d029a3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,327 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 00000000..98e00718 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,248 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 00000000..d1f58e9e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 00000000..869bed51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 00000000..cadaef3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 00000000..fbcd4831 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 00000000..80282d4a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 00000000..75f757ce --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 00000000..cc8034cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,409 @@ +# Release History + +## 1.3.0 (2023-05-09) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.5 +* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` +* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` + +### Other Changes +* Upgraded to MSAL v1.0.0 + +## 1.3.0-beta.5 (2023-04-11) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.4 +* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`. + The constructor now reads default configuration from environment variables set by the Azure + workload identity webhook by default. + ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478)) +* Removed CAE support. It will return in v1.4.0-beta.1 + ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479)) + +### Bugs Fixed +* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances. + +## 1.3.0-beta.4 (2023-03-08) + +### Features Added +* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery` + +### Bugs Fixed +* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines + ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044)) + +### Other Changes +* Upgraded dependencies + +## 1.2.2 (2023-03-07) + +### Other Changes +* Upgraded dependencies + +## 1.3.0-beta.3 (2023-02-07) + +### Features Added +* By default, credentials set client capability "CP1" to enable support for + [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + This indicates to Azure Active Directory that your application can handle CAE claims challenges. + You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". +* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login + prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) +* Service principal and user credentials support ADFS authentication on Azure Stack. + Specify "adfs" as the credential's tenant. +* Applications running in private or disconnected clouds can prevent credentials from + requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + field on credential options. +* Many credentials can now be configured to authenticate in multiple tenants. The + options types for these credentials have an `AdditionallyAllowedTenants` field + that specifies additional tenants in which the credential may authenticate. + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 00000000..48ea6616 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 00000000..4ac53eb7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 00000000..da0baa9a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,243 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource +|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 00000000..7b7515eb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,205 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + + +## Troubleshoot `WorkloadIdentityCredential` authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json new file mode 100644 index 00000000..47e77f88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/azidentity", + "Tag": "go/azidentity_6225ab0470" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 00000000..739ff49c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +var ( + // capability CP1 indicates the client application is capable of handling CAE claims challenges + cp1 = []string{"CP1"} + // CP1 is disabled until CAE support is added back + disableCP1 = true +) + +var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + authority := runtime.JoinPaths(authorityHost, tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, clientID, cred, o...) +} + +var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, public.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(clientID, o...) +} + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) + AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 00000000..33ff13c0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,180 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + credNameAzureCLI = "AzureCLICredential" + timeoutCLIRequest = 10 * time.Second +) + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + s *syncer + tokenProvider azureCLITokenProvider +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c := AzureCLICredential{tokenProvider: cp.tokenProvider} + c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) + return &c, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + at, err := c.createAccessToken(b) + if err != nil { + return azcore.AccessToken{}, err + } + return at, nil +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + } + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + // the Azure CLI's "expiresOn" is local time + exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: exp.UTC(), + } + return converted, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 00000000..dc855edf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var ( + err error + errs []error + successfulCredential azcore.TokenCredential + token azcore.AccessToken + unavailableErr *credentialUnavailableError + ) + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + // continue to the next source iff this one returned credentialUnavailableError + if !errors.As(err, &unavailableErr) { + break + } + } + if c.iterating { + c.cond.L.Lock() + // this is nil when all credentials returned an error + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res, err) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 00000000..3b443e8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 00000000..d9d22996 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient + s *syncer +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + cac := ClientAssertionCredential{client: c} + cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) + return &cac, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 00000000..804eba89 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient + s *syncer +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + var o []confidential.Option + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + if err != nil { + return nil, err + } + cc := ClientCertificateCredential{client: c} + cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) + return &cc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 00000000..dda21f6b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient + s *syncer +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := getConfidentialClient( + clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + csc := ClientSecretCredential{client: c} + csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) + return &csc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 00000000..1e3efdc9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping +// when one provides a token: +// +// - [EnvironmentCredential] +// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload +// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing +// more control over its configuration. +// - [ManagedIdentityCredential] +// - [AzureCLICredential] +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + additionalTenants := options.AdditionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, + }) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID + wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + }) + if err == nil { + creds = append(creds, wic) + } else { + errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) + } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + miCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) + +// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available +type timeoutWrapper struct { + mic *ManagedIdentityCredential + // timeout applies to all auth attempts until one doesn't time out + timeout time.Duration +} + +// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout +// because managed identity may not be available and connecting to IMDS can take several minutes to time out. +func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var tk azcore.AccessToken + var err error + // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section + if w.timeout > 0 { + c, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + tk, err = w.mic.GetToken(c, opts) + if isAuthFailedDueToContext(err) { + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + } else { + // some managed identity implementation is available, so don't apply the timeout to future calls + w.timeout = 0 + } + } else { + tk, err = w.mic.GetToken(ctx, opts) + } + return tk, err +} + +// unwraps nested AuthenticationFailedErrors to get the root error +func isAuthFailedDueToContext(err error) bool { + for { + var authFailedErr *AuthenticationFailedError + if !errors.As(err, &authFailedErr) { + break + } + err = authFailedErr.err + } + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 00000000..108e83c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + account public.Account + client publicClient + s *syncer + prompt func(context.Context, DeviceCodeMessage) error +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient( + cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} + cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) + return &cred, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) + if err != nil { + return azcore.AccessToken{}, err + } + err = c.prompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err := dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, err + } + c.account = ar.Account + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 00000000..7ecd928e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit + // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential + // directly should set that variable instead. This field should remain unexported to preserve this credential's + // unambiguous "all configuration from environment variables" design. + additionallyAllowedTenants []string +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +// +// # Configuration for multitenant applications +// +// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants +// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set +// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS + additionalTenants := options.additionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 00000000..86d8976a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string + err error +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + case credNameWorkloadIdentity: + anchor = "workload" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required +// data or state +type credentialUnavailableError struct { + message string +} + +// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting +func newCredentialUnavailableError(credType, message string) error { + msg := fmt.Sprintf("%s: %s", credType, message) + return &credentialUnavailableError{msg} +} + +// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication +// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try +// its next credential, if any. +func NewCredentialUnavailableError(message string) error { + return &credentialUnavailableError{message} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *credentialUnavailableError) Error() string { + return e.message +} + +// NonRetriable is a marker method indicating this error should not be retried. It has no implementation. +func (e *credentialUnavailableError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 00000000..4868d22c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,106 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredential" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. + LoginHint string + // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + // only when setting ClientID, and must match a redirect URI in the application's registration. + // Applications which have registered "http://localhost" as a redirect URI need not set this option. + RedirectURL string + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + account public.Account + client publicClient + options InteractiveBrowserCredentialOptions + s *syncer +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + ibc := InteractiveBrowserCredential{client: c, options: cp} + ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) + return &ibc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, + public.WithLoginHint(c.options.LoginHint), + public.WithRedirectURI(c.options.RedirectURL), + public.WithTenantID(opts.TenantID), + ) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 00000000..1aa1e0fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 00000000..d7b4a32a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,388 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + } + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := os.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 00000000..c6710ae5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,127 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client confidentialClient + mic *managedIdentityClient + s *syncer +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used + c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + if err != nil { + return nil, err + } + m := ManagedIdentityCredential{client: c, mic: mic} + m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) + return &m, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go new file mode 100644 index 00000000..3e173f47 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameOBO = "OnBehalfOfCredential" + +// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by +// middle-tier services that authorize requests to other services with a delegated user identity. Because this +// is not an interactive authentication flow, an application using it must have admin consent for any delegated +// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// +// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +type OnBehalfOfCredential struct { + assertion string + client confidentialClient + s *syncer +} + +// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential +type OnBehalfOfCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. + // This setting controls whether the credential sends the public certificate chain in the x5c header of each + // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. + SendCertificateChain bool +} + +// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate. +// See [ParseCertificates] for help loading a certificate. +func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. +func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if options == nil { + options = &OnBehalfOfCredentialOptions{} + } + opts := []confidential.Option{} + if options.SendCertificateChain { + opts = append(opts, confidential.WithX5C()) + } + opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + if err != nil { + return nil, err + } + obo := OnBehalfOfCredential{assertion: userAssertion, client: c} + obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) + return &obo, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return o.s.GetToken(ctx, opts) +} + +func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go new file mode 100644 index 00000000..ae385559 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) + +// syncer synchronizes authentication calls so that goroutines can share a credential instance +type syncer struct { + addlTenants []string + authing bool + cond *sync.Cond + reqToken, silent authFn + name, tenant string +} + +func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { + return &syncer{ + addlTenants: resolveAdditionalTenants(additionalTenants), + cond: &sync.Cond{L: &sync.Mutex{}}, + name: name, + reqToken: reqToken, + silent: silentAuth, + tenant: tenant, + } +} + +// GetToken ensures that only one goroutine authenticates at a time +func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var at azcore.AccessToken + var err error + if len(opts.Scopes) == 0 { + return at, errors.New(s.name + ".GetToken() requires at least one scope") + } + // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants + if s.name != credNameManagedIdentity { + tenant, err := s.resolveTenant(opts.TenantID) + if err != nil { + return at, err + } + opts.TenantID = tenant + } + auth := false + s.cond.L.Lock() + defer s.cond.L.Unlock() + for { + at, err = s.silent(ctx, opts) + if err == nil { + // got a token + break + } + if !s.authing { + // this goroutine will request a token + s.authing, auth = true, true + break + } + // another goroutine is acquiring a token; wait for it to finish, then try silent auth again + s.cond.Wait() + } + if auth { + s.authing = false + at, err = s.reqToken(ctx, opts) + s.cond.Broadcast() + } + if err != nil { + // Return credentialUnavailableError directly because that type affects the behavior of credential chains. + // Otherwise, return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(s.name, err.Error(), res, err) + } + } else if log.Should(EventAuthentication) { + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// resolveTenant returns the correct tenant for a token request given the credential's +// configuration, or an error when the specified tenant isn't allowed by that configuration +func (s *syncer) resolveTenant(requested string) (string, error) { + if requested == "" || requested == s.tenant { + return s.tenant, nil + } + if s.tenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(requested) { + return "", errors.New(tenantIDValidationErr) + } + for _, t := range s.addlTenants { + if t == "*" || t == requested { + return requested, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) +} + +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 00000000..8e652e33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + account public.Account + client publicClient + password, username string + s *syncer +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + upc := UsernamePasswordCredential{client: c, password: password, username: username} + upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) + return &upc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 00000000..1a526b2e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.3.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go new file mode 100644 index 00000000..7bfb3436 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameWorkloadIdentity = "WorkloadIdentityCredential" + +// WorkloadIdentityCredential supports Azure workload identity on Kubernetes. +// See [Azure Kubernetes Service documentation] for more information. +// +// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview +type WorkloadIdentityCredential struct { + assertion, file string + cred *ClientAssertionCredential + expires time.Time + mtx *sync.RWMutex +} + +// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential. +type WorkloadIdentityCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. + TenantID string + // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // environment variable AZURE_FEDERATED_TOKEN_FILE. + TokenFilePath string +} + +// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read +// from environment variables as set by the Azure workload identity webhook. Set options to override those values. +func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) { + if options == nil { + options = &WorkloadIdentityCredentialOptions{} + } + ok := false + clientID := options.ClientID + if clientID == "" { + if clientID, ok = os.LookupEnv(azureClientID); !ok { + return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options") + } + } + file := options.TokenFilePath + if file == "" { + if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok { + return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options") + } + } + tenantID := options.TenantID + if tenantID == "" { + if tenantID, ok = os.LookupEnv(azureTenantID); !ok { + return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options") + } + } + w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) + if err != nil { + return nil, err + } + // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" + cred.s.name = credNameWorkloadIdentity + w.cred = cred + return &w, nil +} + +// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return w.cred.GetToken(ctx, opts) +} + +// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. +// Kubernetes is responsible for updating the file as service account tokens expire. +func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) { + w.mtx.RLock() + if w.expires.Before(time.Now()) { + // ensure only one goroutine at a time updates the assertion + w.mtx.RUnlock() + w.mtx.Lock() + defer w.mtx.Unlock() + // double check because another goroutine may have acquired the write lock first and done the update + if now := time.Now(); w.expires.Before(now) { + content, err := os.ReadFile(w.file) + if err != nil { + return "", err + } + w.assertion = string(content) + // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL + // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour), + // but we add some margin for safety. + w.expires = now.Add(10 * time.Minute) + } + } else { + defer w.mtx.RUnlock() + } + return w.assertion, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 00000000..48ea6616 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 00000000..245af7d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 00000000..66bf13e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 00000000..8c6eacb6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 00000000..ade7b348 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 00000000..d4ed6ccc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 00000000..d7876d29 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 00000000..4f1dcf1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 00000000..db826962 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 00000000..238ef42e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 00000000..a3824bee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 00000000..278ac9cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 00000000..3d8b93bc --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 00000000..19210883 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +import "context" + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportHints are suggestions for storing data. +type ExportHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ReplaceHints are suggestions for loading data. +type ReplaceHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or +// define the outcome of passing one. A Context without a timeout must receive a default timeout +// specified by the implementor. Retries must be implemented inside the implementation. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. Implementors should honor + // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases. + Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error + // Export writes the binary representation of the cache (cache.Marshal()) to external storage. + // This is considered opaque. Context cancellations should be honored as in Replace. + Export(ctx context.Context, cache Marshaler, hints ExportHints) error +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 00000000..6612feb4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,685 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. As such I've put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key +// as returned by [CertFromPEM]. +func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + cred *accesstokens.Credential +} + +// clientOptions are optional settings for New(). These options are set using various functions +// returning Option calls. +type clientOptions struct { + accessor cache.ExportReplace + authority, azureRegion string + capabilities []string + disableInstanceDiscovery, sendX5C bool + httpClient ops.HTTPClient +} + +// Option is an optional argument to New(). +type Option func(o *clientOptions) + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *clientOptions) { + o.sendX5C = true + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *clientOptions) { + o.azureRegion = val + } +} + +// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/". +// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its +// "application ID"). +func New(authority, clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := clientOptions{ + authority: authority, + // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache + disableInstanceDiscovery: cred.tokenProvider != nil, + httpClient: shared.DefaultClient, + } + for _, o := range options { + o(&opts) + } + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.accessor), + base.WithClientCapabilities(opts.capabilities), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), + base.WithRegionDetection(opts.azureRegion), + base.WithX5C(opts.sendX5C), + } + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...) + if err != nil { + return Client{}, err + } + base.AuthParams.IsConfidentialClient = true + + return Client{base: base, cred: internalCred}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: o.account.IsZero(), + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Claims: o.claims, + Credential: cca.cred, + TenantID: o.tenantID, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(ctx context.Context, accountID string) (Account, error) { + return cca.base.Account(ctx, accountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(ctx context.Context, account Account) error { + return cca.base.RemoveAccount(ctx, account) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 00000000..7ef7862f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 00000000..c9b8dbed --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 00000000..5f68384f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,467 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In production it's a *storage.Manager or *storage.PartitionedManager. +type manager interface { + cache.Serializer + Read(context.Context, authority.AuthParams) (storage.TokenResponse, error) + Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error) +} + +// accountManager is a manager that also caches accounts. In production it's a *storage.Manager. +type accountManager interface { + manager + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + TenantID string + UserAssertion string + AuthorizationType authority.AuthorizeType + Claims string +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + Claims string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential + TenantID string +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Claims string + Credential *accesstokens.Credential + TenantID string + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager accountManager // *storage.Manager or fakeManager in tests + // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests + pmanager manager + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace + cacheAccessorMu *sync.RWMutex +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) error + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) error { + if ca != nil { + c.cacheAccessor = ca + } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) error { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + return nil + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) error { + c.AuthParams.SendX5C = sendX5C + return nil + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessorMu: &sync.RWMutex{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + if err = o(&client); err != nil { + break + } + } + return client, err + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if claims != "" { + v.Add("claims", claims) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + ar := AuthResult{} + // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return ar, err + } + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims + authParams.UserAssertion = silent.UserAssertion + + m := b.pmanager + if authParams.AuthorizationType != authority.ATOnBehalfOf { + authParams.AuthorizationType = authority.ATRefreshToken + m = b.manager + } + if b.cacheAccessor != nil { + key := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessorMu.RLock() + err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + b.cacheAccessorMu.RUnlock() + } + if err != nil { + return ar, err + } + storageTokenResponse, err := m.Read(ctx, authParams) + if err != nil { + return ar, err + } + + // ignore cached access tokens when given claims + if silent.Claims == "" { + ar, err = AuthResultFromStorage(storageTokenResponse) + if err == nil { + return ar, err + } + } + + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return ar, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return ar, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + var ar AuthResult + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, + } + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + } + return ar, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + var m manager = b.manager + if authParams.AuthorizationType == authority.ATOnBehalfOf { + m = b.pmanager + } + key := token.CacheKey(authParams) + if b.cacheAccessor != nil { + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return AuthResult{}, err + } + } + account, err := m.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + ar, err := NewAuthResult(token, account) + if err == nil && b.cacheAccessor != nil { + err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) + } + return ar, err +} + +func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return nil, err + } + } + return b.manager.AllAccounts(), nil +} + +func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return shared.Account{}, err + } + } + return b.manager.Account(homeAccountID), nil +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error { + if b.cacheAccessor == nil { + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return nil + } + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return err + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 00000000..548c2fae --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 00000000..87d7d797 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,436 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err == nil { + tr.AccessToken = accessToken + } + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account + } + return tr, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 00000000..add75192 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + tr.AccessToken = accessToken + + if homeAccountID == "" { + // caller didn't specify a user, so there's no reason to search for an ID or refresh token + return tr, nil + } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(homeAccountID, aliases, realm) + if err == nil { + tr.Account = account + } + return tr, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 00000000..1d818192 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 00000000..7b673e3f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 00000000..09edb01b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 00000000..2238521f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 00000000..cef442f2 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 00000000..df5dc6e1 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 00000000..07751544 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 00000000..a1c99621 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 00000000..04236ff3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

      Authentication complete. You can return to the application. Feel free to close this browser tab.

      + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

      Authentication failed. You can return to the application. Feel free to close this browser tab.

      +

      Error details: error %s error_description: %s

      + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 00000000..ebd86e2b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,353 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := scopeError(req.AuthParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + Claims: authParams.Claims, + CorrelationID: uuid.New().String(), + Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + if len(scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + return accesstokens.TokenResponse{}, err + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + err = fmt.Errorf("problem getting SAML token info: %w", err) + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil + case authority.Managed: + if len(authParams.Scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := scopeError(authParams); err != nil { + return DeviceCode{}, err + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} + +// scopeError takes an authority.AuthParams and returns an error +// if len(AuthParams.Scope) == 0. +func scopeError(a authority.AuthParams) error { + // TODO(someone): we could look deeper at the message to determine if + // it's a scope error, but this is a good start. + /* + {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope + openid offline_access profile is not valid. Client credential flows must have a scope value + with /.default suffixed to the resource identifier (application ID URI)...} + */ + if len(a.Scopes) == 0 { + return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid") + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 00000000..fa6bb61c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 00000000..3bec4a67 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 00000000..b3892bf3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" { + return "" + } else if c.UTID == "" { + return fmt.Sprintf("%s.%s", c.UID, c.UID) + } else { + return fmt.Sprintf("%s.%s", c.UID, c.UTID) + } +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 00000000..7b2ccb4f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,552 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + autoDetectRegion = "TryAutoDetect" +) + +// These are various hosts that host AAD Instance discovery endpoints. +const ( + defaultHost = "login.microsoftonline.com" + loginMicrosoft = "login.microsoft.com" + loginWindows = "login.windows.net" + loginSTSWindows = "sts.windows.net" + loginMicrosoftOnline = defaultHost +) + +// jsonCaller is an interface that allows us to mock the JSONCall method. +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +// OAuthResponseBase is the base JSON return message for an OAuth call. +// This is embedded in other calls to get the base fields from every response. +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS + } + + // u.Host includes the port, if any, which is required for private cloud deployments + return Info{ + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost: + environment = loginMicrosoft + } + + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 00000000..10039773 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 00000000..7d9ec7cd --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 00000000..4d3dbfcf --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 00000000..b628f61a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 00000000..1f9c543f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 00000000..a2bb6278 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 00000000..64972700 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 00000000..7d072556 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 00000000..6fe5efa8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 00000000..8fad5efb --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 00000000..e3d19886 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 00000000..47cd4c69 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 00000000..0ade4117 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 00000000..4561d72d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 00000000..f7e12a71 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 00000000..b76c0c56 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "1.0.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 00000000..cce05277 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,683 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// clientOptions configures the Client's behavior. +type clientOptions struct { + accessor cache.ExportReplace + authority string + capabilities []string + disableInstanceDiscovery bool + httpClient ops.HTTPClient +} + +func (p *clientOptions) validate() error { + u, err := url.Parse(p.authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *clientOptions) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *clientOptions) { + o.authority = authority + } +} + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := clientOptions{ + authority: base.AuthorityPublicCloud, + httpClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + case *interactiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + case *interactiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + Claims: o.claims, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts(ctx context.Context) ([]Account, error) { + return pca.base.AllAccounts(ctx) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(ctx context.Context, account Account) error { + return pca.base.RemoveAccount(ctx, account) +} + +// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type interactiveAuthOptions struct { + claims, domainHint, loginHint, redirectURI, tenantID string +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + case *interactiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + case *interactiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithRedirectURI sets a port for the local server used in interactive authentication, for +// example http://localhost:port. All URI components other than the port are ignored. +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.redirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +// +// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := interactiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if o.redirectURI != "" { + redirectURL, err = url.Parse(o.redirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 00000000..4025e01e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 00000000..d700ec47 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 00000000..163ffe72 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 00000000..657564a8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 00000000..8dbd9248 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 00000000..27267023 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 00000000..741bb530 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 00000000..034cad8e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000..6b061e61 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000..c87d1c4b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,30 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - structcheck + - govet + - staticcheck + - deadcode + - errcheck + - varcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000..f1262642 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 00000000..9ff7da9c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000..eac19178 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000..d8f54dcb --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000..a7823589 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000..203072e4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000..74f97caa --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 00000000..a242ad70 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000..7c4bed33 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 00000000..2ce45dd4 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 00000000..78d409cd --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 00000000..3e22c60e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,100 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig/v3" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 00000000..13a5cd55 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 00000000..ade88969 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 00000000..57fcec1d --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 00000000..f68e4182 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 00000000..3fbe08aa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 00000000..e0ae628c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 00000000..dd950a28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 4818ea42..776e31b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,16 +20,16 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. // Should be used when wanting to see all errors while attempting to @@ -192,6 +192,23 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. @@ -283,16 +300,16 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -432,6 +449,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { return c } +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { @@ -576,6 +600,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go index 6eda2a55..4138e725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -4,13 +4,13 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "fmt" "io/ioutil" "path/filepath" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -55,6 +55,19 @@ type Provider struct { // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider } // NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured @@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) { // RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal // by exchanging the accessToken present in ~/.aws/sso/cache. func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken } output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, + AccessToken: accessToken, AccountId: &p.AccountID, RoleName: &p.RoleName, }) @@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val }, nil } -func getCacheFileName(url string) (string, error) { +func getCachedFilePath(startUrl string) (string, error) { hash := sha1.New() - _, err := hash.Write([]byte(url)) + _, err := hash.Write([]byte(startUrl)) if err != nil { return "", err } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil } type token struct { @@ -153,13 +165,8 @@ func (t token) Expired() bool { return nowTime().Round(0).After(time.Time(t.ExpiresAt)) } -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) if err != nil { return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000..f6fa8845 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 00000000..7562cd01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,139 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 260a37cb..86db488d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -9,7 +9,7 @@ to refresh the credentials will be synchronized. But, the SDK is unable to ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. -Assume Role +# Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. @@ -27,7 +27,7 @@ with the SDKs's stscreds package. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with static MFA Token +# Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials @@ -49,7 +49,7 @@ credentials. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with MFA Token Provider +# Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -74,7 +74,6 @@ single Credentials with an AssumeRoleProvider can be shared safely. // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) - */ package stscreds @@ -199,6 +198,10 @@ type AssumeRoleProvider struct { // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -320,6 +323,7 @@ func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (crede Tags: p.Tags, PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, } if p.Policy != nil { input.Policy = p.Policy diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index df63bade..f4cc8751 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -57,13 +57,13 @@ type EC2Metadata struct { // New creates a new instance of the EC2Metadata client with a session. // This client is safe to use across multiple goroutines. // -// // Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) // -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { c := p.ClientConfig(ServiceName, cfgs...) return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 4b29f190..604aeffd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -1,6 +1,7 @@ package ec2metadata import ( + "fmt" "net/http" "sync/atomic" "time" @@ -33,11 +34,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { return &tokenProvider{client: c, configuredTTL: duration} } +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + // fetchTokenHandler fetches token for EC2Metadata service client by default. func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { return } @@ -49,23 +54,21 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } - // change the disabled flag on token provider to true, - // when error is request timeout error. + // change the disabled flag on token provider to true and fallback if requestFailureError, ok := err.(awserr.RequestFailure); ok { switch requestFailureError.StatusCode() { case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: atomic.StoreUint32(&t.disabled, 1) + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) case http.StatusBadRequest: r.Error = requestFailureError } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } } return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 3e396bb3..40e02986 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -13,6 +13,8 @@ const ( AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. ) // AWS Standard partition's regions. @@ -37,6 +39,7 @@ const ( EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). @@ -69,8 +72,14 @@ const ( UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) +// AWS ISOE (Europe) partition's regions. +const () + +// AWS ISOF partition's regions. +const () + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -78,7 +87,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -94,6 +103,8 @@ var defaultPartitions = partitions{ awsusgovPartition, awsisoPartition, awsisobPartition, + awsisoePartition, + awsisofPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -107,7 +118,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") return reg }(), }, @@ -203,6 +214,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, "me-central-1": region{ Description: "Middle East (UAE)", }, @@ -346,6 +360,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -484,6 +501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -592,6 +612,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -601,6 +624,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -613,12 +639,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -856,6 +888,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -911,6 +946,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -1411,6 +1449,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -1837,6 +1883,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1846,18 +1895,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -1867,6 +1925,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2047,6 +2108,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2204,6 +2268,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2347,6 +2414,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2390,24 +2460,39 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2417,6 +2502,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2556,6 +2647,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2599,6 +2693,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2614,12 +2711,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2629,6 +2732,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3139,6 +3245,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3154,12 +3263,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3169,6 +3284,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3199,6 +3317,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3217,6 +3341,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3230,9 +3360,27 @@ var awsPartition = partition{ }, "arc-zonal-shift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3242,21 +3390,57 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -3318,6 +3502,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3345,6 +3538,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3363,6 +3565,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -3381,6 +3592,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3444,6 +3664,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3477,6 +3706,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, @@ -3492,6 +3727,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, @@ -3507,6 +3748,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -3522,6 +3769,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, }, }, "auditmanager": service{ @@ -3631,6 +3884,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3749,6 +4005,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3758,18 +4017,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3870,30 +4138,6 @@ var awsPartition = partition{ }, }, "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.{region}.{dnsSuffix}", - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -3913,6 +4157,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3922,18 +4169,120 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4461,6 +4810,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4601,6 +4953,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4766,6 +5121,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4921,6 +5279,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5101,6 +5462,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5110,18 +5474,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5131,6 +5504,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5447,6 +5823,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5566,6 +5945,9 @@ var awsPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -5596,6 +5978,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -5656,6 +6041,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -5766,6 +6154,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5906,6 +6297,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -5942,6 +6342,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -6538,6 +6944,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6611,12 +7020,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, }, }, "connect-campaigns": service{ @@ -6676,6 +7115,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -6698,12 +7143,21 @@ var awsPartition = partition{ }, "controltower": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -6713,6 +7167,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6737,6 +7194,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6746,6 +7206,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -6785,6 +7248,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -7329,6 +7810,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7529,6 +8013,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7544,6 +8034,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7562,6 +8061,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -7595,6 +8103,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -7704,6 +8218,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7796,6 +8313,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7947,6 +8467,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8166,6 +8689,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8175,18 +8701,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8196,6 +8731,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8248,6 +8786,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8326,6 +8867,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8455,6 +8999,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -8660,6 +9207,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8837,6 +9387,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9006,6 +9559,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9118,6 +9674,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9181,6 +9740,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9299,6 +9861,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9468,6 +10033,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9604,6 +10172,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9775,6 +10352,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -9856,6 +10442,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -9919,6 +10514,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10087,6 +10691,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10255,6 +10862,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10432,6 +11042,12 @@ var awsPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -10462,6 +11078,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10516,6 +11135,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -10559,6 +11181,9 @@ var awsPartition = partition{ }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -10643,6 +11268,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -10769,6 +11397,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10950,6 +11581,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11116,6 +11750,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11179,6 +11816,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11281,6 +11921,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -11302,6 +11945,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11320,6 +11966,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -11332,6 +11981,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11789,6 +12441,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -11798,6 +12453,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11810,12 +12468,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12342,6 +13006,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -12351,18 +13018,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12408,6 +13084,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12565,6 +13244,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12574,15 +13259,69 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + }, }, }, "groundstation": service{ @@ -12708,6 +13447,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12872,6 +13614,9 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -12979,6 +13724,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -13021,6 +13769,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -13030,6 +13781,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -13391,6 +14145,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -13431,6 +14191,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -13451,31 +14216,48 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "internetmonitor.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{ Hostname: "internetmonitor.us-west-1.api.aws", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{ Hostname: "internetmonitor.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.api.aws", + }, }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-east-1", @@ -13523,45 +14305,35 @@ var awsPartition = partition{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-1", }: endpoint{ Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -14206,18 +14978,176 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + }, }, }, "iotwireless": service{ @@ -14314,6 +15244,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14334,6 +15289,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14343,18 +15301,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14364,6 +15337,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14376,15 +15394,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, }, }, "kafkaconnect": service{ @@ -14459,6 +15501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -14588,6 +15633,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{ @@ -14618,6 +15669,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -14638,11 +15694,23 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "kendra-ranking.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{ @@ -14653,6 +15721,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, }, }, "kinesis": service{ @@ -14753,6 +15827,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14820,6 +15897,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14829,18 +15909,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14850,6 +15939,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15296,6 +16388,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15444,6 +16554,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15453,18 +16566,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -15510,6 +16632,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15772,6 +16900,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15993,6 +17130,12 @@ var awsPartition = partition{ }, "license-manager-linux-subscriptions": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -16005,21 +17148,39 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16065,6 +17226,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -16377,6 +17547,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16846,6 +18019,9 @@ var awsPartition = partition{ }, "mediaconnect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -16855,6 +18031,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -16864,6 +18043,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17134,6 +18316,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17183,6 +18368,61 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17452,6 +18692,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17495,6 +18738,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17504,24 +18750,39 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17590,6 +18851,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17599,18 +18863,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17656,6 +18929,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17970,6 +19246,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18037,6 +19316,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18046,18 +19328,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18331,6 +19622,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18340,6 +19634,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18352,12 +19649,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18478,18 +19781,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -18557,6 +19875,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18760,6 +20081,94 @@ var awsPartition = partition{ }, }, }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18872,6 +20281,40 @@ var awsPartition = partition{ }, }, }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -19180,6 +20623,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19429,6 +20875,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -19736,18 +21185,63 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, }, }, "projects.iot1click": service{ @@ -20071,6 +21565,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20231,6 +21728,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20355,6 +21855,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20795,6 +22298,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20850,12 +22356,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20868,12 +22380,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -21209,16 +22727,6 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -21294,6 +22802,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.il-central-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -21419,6 +22932,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21637,6 +23153,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21646,18 +23165,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21667,6 +23195,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21862,6 +23393,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21871,18 +23405,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21892,6 +23435,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22263,6 +23809,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23049,30 +24604,84 @@ var awsPartition = partition{ }, "scheduler": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -23246,6 +24855,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23349,6 +24961,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23358,18 +24973,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23415,6 +25039,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23467,6 +25094,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -23476,12 +25112,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -23606,6 +25251,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23615,18 +25263,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23636,6 +25293,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -23736,6 +25399,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23745,6 +25411,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -23757,12 +25426,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23873,7 +25548,7 @@ var awsPartition = partition{ Region: "af-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.af-south-1.amazonaws.com", + Hostname: "servicediscovery.af-south-1.api.aws", }, endpointKey{ Region: "ap-east-1", @@ -23882,7 +25557,7 @@ var awsPartition = partition{ Region: "ap-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-east-1.amazonaws.com", + Hostname: "servicediscovery.ap-east-1.api.aws", }, endpointKey{ Region: "ap-northeast-1", @@ -23891,7 +25566,7 @@ var awsPartition = partition{ Region: "ap-northeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-1.api.aws", }, endpointKey{ Region: "ap-northeast-2", @@ -23900,7 +25575,7 @@ var awsPartition = partition{ Region: "ap-northeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-2.api.aws", }, endpointKey{ Region: "ap-northeast-3", @@ -23909,7 +25584,7 @@ var awsPartition = partition{ Region: "ap-northeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-3.api.aws", }, endpointKey{ Region: "ap-south-1", @@ -23918,7 +25593,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-1.amazonaws.com", + Hostname: "servicediscovery.ap-south-1.api.aws", }, endpointKey{ Region: "ap-south-2", @@ -23927,7 +25602,7 @@ var awsPartition = partition{ Region: "ap-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-2.amazonaws.com", + Hostname: "servicediscovery.ap-south-2.api.aws", }, endpointKey{ Region: "ap-southeast-1", @@ -23936,7 +25611,7 @@ var awsPartition = partition{ Region: "ap-southeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-1.api.aws", }, endpointKey{ Region: "ap-southeast-2", @@ -23945,7 +25620,7 @@ var awsPartition = partition{ Region: "ap-southeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-2.api.aws", }, endpointKey{ Region: "ap-southeast-3", @@ -23954,7 +25629,16 @@ var awsPartition = partition{ Region: "ap-southeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -23963,7 +25647,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ca-central-1.amazonaws.com", + Hostname: "servicediscovery.ca-central-1.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -23971,6 +25655,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1-fips", }: endpoint{ @@ -23987,7 +25677,7 @@ var awsPartition = partition{ Region: "eu-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-1.amazonaws.com", + Hostname: "servicediscovery.eu-central-1.api.aws", }, endpointKey{ Region: "eu-central-2", @@ -23996,7 +25686,7 @@ var awsPartition = partition{ Region: "eu-central-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-2.amazonaws.com", + Hostname: "servicediscovery.eu-central-2.api.aws", }, endpointKey{ Region: "eu-north-1", @@ -24005,7 +25695,7 @@ var awsPartition = partition{ Region: "eu-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-north-1.amazonaws.com", + Hostname: "servicediscovery.eu-north-1.api.aws", }, endpointKey{ Region: "eu-south-1", @@ -24014,7 +25704,7 @@ var awsPartition = partition{ Region: "eu-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-1.amazonaws.com", + Hostname: "servicediscovery.eu-south-1.api.aws", }, endpointKey{ Region: "eu-south-2", @@ -24023,7 +25713,7 @@ var awsPartition = partition{ Region: "eu-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-2.amazonaws.com", + Hostname: "servicediscovery.eu-south-2.api.aws", }, endpointKey{ Region: "eu-west-1", @@ -24032,7 +25722,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-1.amazonaws.com", + Hostname: "servicediscovery.eu-west-1.api.aws", }, endpointKey{ Region: "eu-west-2", @@ -24041,7 +25731,7 @@ var awsPartition = partition{ Region: "eu-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-2.amazonaws.com", + Hostname: "servicediscovery.eu-west-2.api.aws", }, endpointKey{ Region: "eu-west-3", @@ -24050,7 +25740,16 @@ var awsPartition = partition{ Region: "eu-west-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-3.amazonaws.com", + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", }, endpointKey{ Region: "me-central-1", @@ -24059,7 +25758,7 @@ var awsPartition = partition{ Region: "me-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-central-1.amazonaws.com", + Hostname: "servicediscovery.me-central-1.api.aws", }, endpointKey{ Region: "me-south-1", @@ -24068,7 +25767,7 @@ var awsPartition = partition{ Region: "me-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-south-1.amazonaws.com", + Hostname: "servicediscovery.me-south-1.api.aws", }, endpointKey{ Region: "sa-east-1", @@ -24077,34 +25776,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, + Hostname: "servicediscovery.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -24113,7 +25785,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-1.amazonaws.com", + Hostname: "servicediscovery.us-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -24121,6 +25793,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1-fips", }: endpoint{ @@ -24137,7 +25815,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-2.amazonaws.com", + Hostname: "servicediscovery.us-east-2.api.aws", }, endpointKey{ Region: "us-east-2", @@ -24145,6 +25823,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2-fips", }: endpoint{ @@ -24161,7 +25845,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-1.amazonaws.com", + Hostname: "servicediscovery.us-west-1.api.aws", }, endpointKey{ Region: "us-west-1", @@ -24169,6 +25853,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1-fips", }: endpoint{ @@ -24185,7 +25875,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-2.amazonaws.com", + Hostname: "servicediscovery.us-west-2.api.aws", }, endpointKey{ Region: "us-west-2", @@ -24193,6 +25883,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2-fips", }: endpoint{ @@ -24229,6 +25925,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -24238,18 +25937,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24402,35 +26110,7 @@ var awsPartition = partition{ }, }, }, - "simspaceweaver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sms": service{ + "signer": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -24477,7 +26157,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + Hostname: "signer-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -24486,7 +26166,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + Hostname: "signer-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -24495,7 +26175,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + Hostname: "signer-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -24504,7 +26184,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", + Hostname: "signer-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, @@ -24523,7 +26203,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + Hostname: "signer-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -24532,7 +26212,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + Hostname: "signer-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -24541,7 +26221,57 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-2", @@ -24893,6 +26623,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -25010,7 +26743,161 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -25019,7 +26906,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", + Hostname: "sqs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -25028,7 +26915,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", + Hostname: "sqs-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -25037,12 +26924,15 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", + Hostname: "sqs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25054,12 +26944,15 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", }, endpointKey{ Region: "us-east-2", @@ -25068,7 +26961,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", + Hostname: "sqs-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -25077,7 +26970,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", + Hostname: "sqs-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -25086,17 +26979,11 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", + Hostname: "sqs-fips.us-west-2.amazonaws.com", }, }, }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, + "ssm": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -25134,6 +27021,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25158,10 +27051,19 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + Hostname: "ssm-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -25170,7 +27072,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + Hostname: "ssm-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -25179,7 +27081,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + Hostname: "ssm-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -25188,12 +27090,15 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + Hostname: "ssm-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25205,15 +27110,12 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "us-east-1", - }: endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - SSLCommonName: "queue.{dnsSuffix}", + Hostname: "ssm-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -25222,7 +27124,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + Hostname: "ssm-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -25231,7 +27133,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + Hostname: "ssm-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -25240,69 +27142,36 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + Hostname: "ssm-fips.us-west-2.amazonaws.com", }, }, }, - "ssm": service{ + "ssm-contacts": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - }, endpointKey{ Region: "eu-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25312,19 +27181,10 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -25333,7 +27193,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -25342,7 +27202,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -25351,18 +27211,12 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -25373,7 +27227,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -25382,7 +27236,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -25391,7 +27245,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -25400,7 +27254,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", }, }, }, @@ -25424,6 +27278,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25439,21 +27299,90 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, }, }, "ssm-sap": service{ @@ -25488,6 +27417,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25506,6 +27441,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -25515,15 +27495,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, }, }, "sso": service{ @@ -25694,6 +27698,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25761,6 +27768,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25770,6 +27780,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -25812,15 +27825,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25974,6 +27978,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -26077,6 +28084,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26284,6 +28294,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26429,6 +28442,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26538,6 +28554,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26679,6 +28698,25 @@ var awsPartition = partition{ }, }, }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -26828,12 +28866,21 @@ var awsPartition = partition{ }, "transcribestreaming": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -26991,6 +29038,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -27000,6 +29050,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -27012,12 +29065,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -27072,6 +29131,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -27218,6 +29280,91 @@ var awsPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "voice-chime": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27367,6 +29514,31 @@ var awsPartition = partition{ }, }, }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -27530,6 +29702,23 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -27581,6 +29770,23 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -27615,6 +29821,23 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -27649,6 +29872,23 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -27754,6 +29994,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -27781,6 +30030,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -27799,6 +30057,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -27817,6 +30084,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -27844,6 +30120,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -27907,6 +30192,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -28132,6 +30434,23 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -28183,6 +30502,23 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -28217,6 +30553,23 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -28251,6 +30604,23 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -28356,6 +30726,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -28383,6 +30762,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -28401,6 +30789,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -28419,6 +30816,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -28446,6 +30852,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -28509,6 +30924,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -29023,6 +31455,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -29162,6 +31597,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.ecr": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29289,6 +31734,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29351,6 +31806,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29540,7 +32005,10 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, @@ -29803,6 +32271,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29994,13 +32472,6 @@ var awscnPartition = partition{ }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -30167,6 +32638,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30254,6 +32735,16 @@ var awscnPartition = partition{ }, }, }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -30508,6 +32999,27 @@ var awscnPartition = partition{ }, }, }, + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30566,7 +33078,7 @@ var awscnPartition = partition{ Region: "cn-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", }, endpointKey{ Region: "cn-northwest-1", @@ -30575,7 +33087,7 @@ var awscnPartition = partition{ Region: "cn-northwest-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", }, }, }, @@ -30594,7 +33106,7 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "sms": service{ + "signer": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -30604,6 +33116,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "snowball": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31018,6 +33537,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -31026,6 +33563,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "acm": service{ @@ -31452,13 +34007,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, }, }, "applicationinsights": service{ @@ -31500,6 +34087,24 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -31555,6 +34160,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -31570,6 +34181,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, }, }, "autoscaling": service{ @@ -31633,6 +34250,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -31691,6 +34318,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -31699,6 +34344,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudcontrolapi": service{ @@ -32022,6 +34685,15 @@ var awsusgovPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -32031,6 +34703,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -32205,9 +34886,24 @@ var awsusgovPartition = partition{ }, "connect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, }, }, "controltower": service{ @@ -32395,9 +35091,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "dms": service{ @@ -33695,30 +36421,19 @@ var awsusgovPartition = partition{ }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -33863,14 +36578,114 @@ var awsusgovPartition = partition{ }, }, }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "kendra": service{ @@ -34167,6 +36982,16 @@ var awsusgovPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34303,6 +37128,46 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34597,12 +37462,22 @@ var awsusgovPartition = partition{ "participant.connect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "participant.connect.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", }, }, }, @@ -35053,9 +37928,35 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, }, }, "runtime.lex": service{ @@ -35596,6 +38497,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ @@ -35620,6 +38527,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ @@ -35682,17 +38595,18 @@ var awsusgovPartition = partition{ }, }, }, - "sms": service{ + "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -35702,15 +38616,6 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -35816,14 +38721,14 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, }, }, @@ -36555,6 +39460,15 @@ var awsusgovPartition = partition{ }, "workspaces": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -36564,6 +39478,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -36726,6 +39649,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36738,6 +39668,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36807,6 +39747,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -37161,6 +40111,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "logs": service{ @@ -37221,6 +40174,46 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37260,6 +40253,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ @@ -37413,6 +40409,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "transcribe": service{ @@ -37882,6 +40881,28 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37937,6 +40958,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "snowball": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -38048,3 +41076,71 @@ var awsisobPartition = partition{ }, }, } + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 1d3f4c3a..ea8e3537 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" "github.com/aws/aws-sdk-go/service/sts" ) @@ -23,6 +24,10 @@ type CredentialsProviderOptions struct { // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, // such as setting its ExpiryWindow. WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) } func resolveCredentials(cfg *aws.Config, @@ -33,7 +38,7 @@ func resolveCredentials(cfg *aws.Config, switch { case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration + // User explicitly provided a Profile in the session's configuration // so load that profile from shared config first. // Github(aws/aws-sdk-go#2727) return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) @@ -134,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config, case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) default: // Fallback to default credentials provider, include mock errors for @@ -173,8 +182,28 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req return nil, err } + var optFns []func(provider *ssocreds.Provider) cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } return ssocreds.NewCredentials( &Session{ @@ -184,6 +213,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, + optFns..., ), nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 4293dbe1..8127c99a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -37,7 +37,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source @@ -174,7 +174,6 @@ const ( // Options provides the means to control how a Session is created and what // configuration values will be loaded. -// type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field @@ -224,7 +223,7 @@ type Options struct { // from stdin for the MFA token code. // // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. + // the config enables assume role with MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) // When the SDK's shared config is configured to assume a role this option @@ -322,24 +321,24 @@ type Options struct { // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) // -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) // -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig var err error @@ -375,7 +374,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { // This helper is intended to be used in variable initialization to load the // Session and configuration at startup. Such as: // -// var sess = session.Must(session.NewSession()) +// var sess = session.Must(session.NewSession()) func Must(sess *Session, err error) *Session { if err != nil { panic(err) @@ -780,16 +779,6 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - cfg.S3UseARNRegion = userCfg.S3UseARNRegion if cfg.S3UseARNRegion == nil { cfg.S3UseARNRegion = &envCfg.S3UseARNRegion @@ -812,6 +801,17 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + return nil } @@ -845,8 +845,8 @@ func initHandlers(s *Session) { // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 424c82b4..ea3ac0d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -26,6 +26,13 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + // AWS Single Sign-On (AWS SSO) group ssoAccountIDKey = "sso_account_id" ssoRegionKey = "sso_region" @@ -99,6 +106,10 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *ssoSession + SSOAccountID string SSORegion string SSORoleName string @@ -186,6 +197,20 @@ type sharedConfigFile struct { IniData ini.Sections } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + // loadSharedConfig retrieves the configuration from the list of files using // the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in @@ -266,13 +291,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s // profile only have credential provider options. cfg.clearAssumeRoleOptions() } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } + profiles[profile] = struct{}{} if err := cfg.validateCredentialType(); err != nil { @@ -308,6 +333,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.SourceProfile = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + return nil } @@ -363,6 +412,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e cfg.S3UsEast1RegionalEndpoint = sre } + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + // AWS Single Sign-On (AWS SSO) updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) updateString(&cfg.SSORegion, section, ssoRegionKey) @@ -461,32 +514,20 @@ func (cfg *sharedConfig) validateCredentialType() error { } func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } return nil } - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } } - return nil } @@ -525,15 +566,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { } func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) } - return true + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil } func oneOrNone(bs ...bool) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 4d78162c..41386bab 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -3,21 +3,21 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage // of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent +// you may need to use the URL.Opaque to define what the raw URI should be sent // to the service as. // // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -695,7 +695,8 @@ func (ctx *signingCtx) buildBodyDigest() error { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" s3Presign := ctx.isPresign && (ctx.ServiceName == "s3" || diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index e9ac366d..3226593c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.217" +const SDKVersion = "1.44.321" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1d273ff0..ecc521f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -287,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) if tag.Get("location") != "header" || tag.Get("enum") == "" { return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) } + if len(value) == 0 { + return "", errValueNotSet + } + buff := &bytes.Buffer{} for i, sv := range value { if sv == nil || len(*sv) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go index d756d8cc..5366a646 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -2,6 +2,7 @@ package restjson import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -40,52 +41,30 @@ func (u *UnmarshalTypedError) UnmarshalError( resp *http.Response, respMeta protocol.ResponseMetadata, ) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err } - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil + } - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } - return v, nil + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err } - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil + return v, nil } // UnmarshalErrorHandler is a named request handler for unmarshaling restjson @@ -99,36 +78,80 @@ var UnmarshalErrorHandler = request.NamedHandler{ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), + awserr.New(code, msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) } type jsonErrorResponse struct { + Type string `json:"__type"` Code string `json:"code"` Message string `json:"message"` } + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 00000000..c743913c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,1682 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns an access token for the authorized client. The access +// token issued will be used to fetch short-term credentials for the assigned +// roles in the AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for each client. This value should come from + // the persisted result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a + // token. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from + // an in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports grant types for the authorization code, refresh token, and device + // code request. For device code requests, specify the following value: + // + // urn:ietf:params:oauth:grant-type:device_code + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // The location of the application that will receive the authorization code. + // Users authorize the service to send the request to this location. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The token used to obtain an access token in the event that the access token + // is invalid or expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // The list of scopes that is defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string `locationName:"accessToken" type:"string"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The identifier of the user that associated with the access token, if present. + IdToken *string `locationName:"idToken" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + ClientSecret *string `locationName:"clientSecret" type:"string"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // The endpoint where the client can get an access token. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 00000000..8b5ee601 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,66 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect +// (OIDC) is a web service that enables a client (such as AWS CLI or a native +// application) to register with IAM Identity Center. The service also enables +// the client to fetch the user’s access token upon successful authentication +// and authorization with IAM Identity Center. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the AWS CLI. Support for other OIDC flows +// frequently needed for native applications, such as Authorization Code +// Flow (+ PKCE), will be addressed in future releases. +// +// - The service emits only OIDC access tokens, such that obtaining a new +// token (For example, token refresh) requires explicit user re-authentication. +// +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with +// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials +// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 00000000..69837701 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 00000000..969f33c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssooidc" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 63729d0a..11af63b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -85,9 +85,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is // allowed to assume the role in the role trust policy. // // To assume a role from a different account, your Amazon Web Services account @@ -96,9 +96,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. // // To allow a user to assume a role in the same account, you can do either of // the following: @@ -517,10 +517,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // a user. You can also supply the user with a consistent identity throughout // the lifetime of an application. // -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web // Services security credentials. Therefore, you can distribute an application @@ -984,11 +982,11 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // call the operation. // // No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1063,18 +1061,26 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // GetFederationToken API operation for AWS Security Token Service. // // Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// // You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, // or an OpenID Connect-compatible identity provider. In this case, we recommend @@ -1083,21 +1089,13 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// // # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). // // # Permissions // @@ -1267,12 +1265,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // or IAM user. The credentials consist of an access key ID, a secret access // key, and a security token. Typically, you use GetSessionToken if you want // to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) @@ -1287,13 +1286,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. // // # Permissions // @@ -1305,20 +1303,20 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // // - You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. // -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1462,6 +1460,9 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` + // Reserved for future use. + ProvidedContexts []*ProvidedContext `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1635,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1676,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn return s } +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1900,8 +1917,12 @@ type AssumeRoleWithSAMLInput struct { // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2036,7 +2057,7 @@ type AssumeRoleWithSAMLOutput struct { // IAM. // // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. + // a user. // // The following pseudocode shows how the hash value is calculated: // @@ -2264,10 +2285,15 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. + // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2573,8 +2599,12 @@ type Credentials struct { // The secret access key that can be used to sign requests. // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` // The token that users must pass to the service API to use the temporary credentials. // @@ -2922,10 +2952,9 @@ type GetFederationTokenInput struct { // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -3376,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } +// Reserved for future use. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + ContextAssertion *string `min:"4" type:"string"` + + // Reserved for future use. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags // to control access to resources. For more information, see Tagging Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index c40f5a2a..ea1d9eb0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -4,10 +4,9 @@ // requests to AWS Security Token Service. // // Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/buger/jsonparser/.gitignore b/vendor/github.com/buger/jsonparser/.gitignore new file mode 100644 index 00000000..5598d8a5 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.gitignore @@ -0,0 +1,12 @@ + +*.test + +*.out + +*.mprof + +.idea + +vendor/github.com/buger/goterm/ +prof.cpu +prof.mem diff --git a/vendor/github.com/buger/jsonparser/.travis.yml b/vendor/github.com/buger/jsonparser/.travis.yml new file mode 100644 index 00000000..dbfb7cf9 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.travis.yml @@ -0,0 +1,11 @@ +language: go +arch: + - amd64 + - ppc64le +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x +script: go test -v ./. diff --git a/vendor/github.com/buger/jsonparser/Dockerfile b/vendor/github.com/buger/jsonparser/Dockerfile new file mode 100644 index 00000000..37fc9fd0 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.6 + +RUN go get github.com/Jeffail/gabs +RUN go get github.com/bitly/go-simplejson +RUN go get github.com/pquerna/ffjson +RUN go get github.com/antonholmquist/jason +RUN go get github.com/mreiferson/go-ujson +RUN go get -tags=unsafe -u github.com/ugorji/go/codec +RUN go get github.com/mailru/easyjson + +WORKDIR /go/src/github.com/buger/jsonparser +ADD . /go/src/github.com/buger/jsonparser \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/LICENSE b/vendor/github.com/buger/jsonparser/LICENSE new file mode 100644 index 00000000..ac25aeb7 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/buger/jsonparser/Makefile b/vendor/github.com/buger/jsonparser/Makefile new file mode 100644 index 00000000..e843368c --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Makefile @@ -0,0 +1,36 @@ +SOURCE = parser.go +CONTAINER = jsonparser +SOURCE_PATH = /go/src/github.com/buger/jsonparser +BENCHMARK = JsonParser +BENCHTIME = 5s +TEST = . +DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER) + +build: + docker build -t $(CONTAINER) . + +race: + $(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s + +bench: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v + +bench_local: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v + +profile: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c + +test: + $(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v + +fmt: + $(DRUN) go fmt ./... + +vet: + $(DRUN) go vet ./. + +bash: + $(DRUN) /bin/bash \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/README.md b/vendor/github.com/buger/jsonparser/README.md new file mode 100644 index 00000000..d7e0ec39 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/README.md @@ -0,0 +1,365 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg) +# Alternative JSON parser for Go (10x times faster standard library) + +It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below. + +## Rationale +Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex. +I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage. +I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures. + + +Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience. + +## Example +For the given JSON our goal is to extract the user's full name, number of github followers and avatar. + +```go +import "github.com/buger/jsonparser" + +... + +data := []byte(`{ + "person": { + "name": { + "first": "Leonid", + "last": "Bugaev", + "fullName": "Leonid Bugaev" + }, + "github": { + "handle": "buger", + "followers": 109 + }, + "avatars": [ + { "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" } + ] + }, + "company": { + "name": "Acme" + } +}`) + +// You can specify key path by providing arguments to Get function +jsonparser.Get(data, "person", "name", "fullName") + +// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type +jsonparser.GetInt(data, "person", "github", "followers") + +// When you try to get object, it will return you []byte slice pointer to data containing it +// In `company` it will be `{"name": "Acme"}` +jsonparser.Get(data, "company") + +// If the key doesn't exist it will throw an error +var size int64 +if value, err := jsonparser.GetInt(data, "company", "size"); err == nil { + size = value +} + +// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN] +jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + fmt.Println(jsonparser.Get(value, "url")) +}, "person", "avatars") + +// Or use can access fields by index! +jsonparser.GetString(data, "person", "avatars", "[0]", "url") + +// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN } +jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType) + return nil +}, "person", "name") + +// The most efficient way to extract multiple keys is `EachKey` + +paths := [][]string{ + []string{"person", "name", "fullName"}, + []string{"person", "avatars", "[0]", "url"}, + []string{"company", "url"}, +} +jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: // []string{"person", "name", "fullName"} + ... + case 1: // []string{"person", "avatars", "[0]", "url"} + ... + case 2: // []string{"company", "url"}, + ... + } +}, paths...) + +// For more information see docs below +``` + +## Need to speedup your app? + +I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com. + +## Reference + +Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it. + +You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser) + + +### **`Get`** +```go +func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error) +``` +Receives data structure, and key path to extract value from. + +Returns: +* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist` + +Accepts multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. + +Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah? + +### **`GetString`** +```go +func GetString(data []byte, keys ...string) (val string, err error) +``` +Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations. + +### **`GetUnsafeString`** +If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations: +```go +s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title") +switch s { + case 'CEO': + ... + case 'Engineer' + ... + ... +} +``` +Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way. + + +### **`GetBoolean`**, **`GetInt`** and **`GetFloat`** +```go +func GetBoolean(data []byte, keys ...string) (val bool, err error) + +func GetFloat(data []byte, keys ...string) (val float64, err error) + +func GetInt(data []byte, keys ...string) (val int64, err error) +``` +If you know the key type, you can use the helpers above. +If key data type do not match, it will return error. + +### **`ArrayEach`** +```go +func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string) +``` +Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`. + +### **`ObjectEach`** +```go +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) +``` +Needed for iterating object, accepts a callback function. Example: +```go +var handler func([]byte, []byte, jsonparser.ValueType, int) error +handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + //do stuff here +} +jsonparser.ObjectEach(myJson, handler) +``` + + +### **`EachKey`** +```go +func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string) +``` +When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well! + +```go +paths := [][]string{ + []string{"uuid"}, + []string{"tz"}, + []string{"ua"}, + []string{"st"}, +} +var data SmallPayload + +jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: + data.Uuid, _ = value + case 1: + v, _ := jsonparser.ParseInt(value) + data.Tz = int(v) + case 2: + data.Ua, _ = value + case 3: + v, _ := jsonparser.ParseInt(value) + data.St = int(v) + } +}, paths...) +``` + +### **`Set`** +```go +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) +``` +Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with updated or added key value. +* `err` - If any parsing issue, it should return error. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")` + +### **`Delete`** +```go +func Delete(data []byte, keys ...string) value []byte +``` +Receives existing data structure, and key path to delete. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")` + + +## What makes it so fast? +* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`. +* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation. +* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included). +* Does not parse full record, only keys you specified + + +## Benchmarks + +There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads. +For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text. +Benchmarks run on standard Linode 1024 box. + +Compared libraries: +* https://golang.org/pkg/encoding/json +* https://github.com/Jeffail/gabs +* https://github.com/a8m/djson +* https://github.com/bitly/go-simplejson +* https://github.com/antonholmquist/jason +* https://github.com/mreiferson/go-ujson +* https://github.com/ugorji/go/codec +* https://github.com/pquerna/ffjson +* https://github.com/mailru/easyjson +* https://github.com/buger/jsonparser + +#### TLDR +If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`. +`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers. +`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation). + +It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified. + +If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`. + +`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want. + +With great power comes great responsibility! :) + + +#### Small payload + +Each test processes 190 bytes of http log as a JSON record. +It should read multiple fields. +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go + +Library | time/op | bytes/op | allocs/op + ------ | ------- | -------- | ------- +encoding/json struct | 7879 | 880 | 18 +encoding/json interface{} | 8946 | 1521 | 38 +Jeffail/gabs | 10053 | 1649 | 46 +bitly/go-simplejson | 10128 | 2241 | 36 +antonholmquist/jason | 27152 | 7237 | 101 +github.com/ugorji/go/codec | 8806 | 2176 | 31 +mreiferson/go-ujson | **7008** | **1409** | 37 +a8m/djson | 3862 | 1249 | 30 +pquerna/ffjson | **3769** | **624** | **15** +mailru/easyjson | **2002** | **192** | **9** +buger/jsonparser | **1367** | **0** | **0** +buger/jsonparser (EachKey API) | **809** | **0** | **0** + +Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson. +If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it. + +#### Medium payload + +Each test processes a 2.4kb JSON record (based on Clearbit API). +It should read multiple nested fields and 1 array. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| ------- | ------- | -------- | --------- | +| encoding/json struct | 57749 | 1336 | 29 | +| encoding/json interface{} | 79297 | 10627 | 215 | +| Jeffail/gabs | 83807 | 11202 | 235 | +| bitly/go-simplejson | 88187 | 17187 | 220 | +| antonholmquist/jason | 94099 | 19013 | 247 | +| github.com/ugorji/go/codec | 114719 | 6712 | 152 | +| mreiferson/go-ujson | **56972** | 11547 | 270 | +| a8m/djson | 28525 | 10196 | 198 | +| pquerna/ffjson | **20298** | **856** | **20** | +| mailru/easyjson | **10512** | **336** | **12** | +| buger/jsonparser | **15955** | **0** | **0** | +| buger/jsonparser (EachKey API) | **8916** | **0** | **0** | + +The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload. + +`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round. +`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads. + + +#### Large payload + +Each test processes a 24kb JSON record (based on Discourse API) +It should read 2 arrays, and for each item in array get a few fields. +Basically it means processing a full JSON file. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| --- | --- | --- | --- | +| encoding/json struct | 748336 | 8272 | 307 | +| encoding/json interface{} | 1224271 | 215425 | 3395 | +| a8m/djson | 510082 | 213682 | 2845 | +| pquerna/ffjson | **312271** | **7792** | **298** | +| mailru/easyjson | **154186** | **6992** | **288** | +| buger/jsonparser | **85308** | **0** | **0** | + +`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough) + +Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient. + +## Questions and support + +All bug-reports and suggestions should go though Github Issues. + +## Contributing + +1. Fork it +2. Create your feature branch (git checkout -b my-new-feature) +3. Commit your changes (git commit -am 'Added some feature') +4. Push to the branch (git push origin my-new-feature) +5. Create new Pull Request + +## Development + +All my development happens using Docker, and repo include some Make tasks to simplify development. + +* `make build` - builds docker image, usually can be called only once +* `make test` - run tests +* `make fmt` - run go fmt +* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file) +* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof` +* `make bash` - enter container (i use it for running `go tool pprof` above) diff --git a/vendor/github.com/buger/jsonparser/bytes.go b/vendor/github.com/buger/jsonparser/bytes.go new file mode 100644 index 00000000..0bb0ff39 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes.go @@ -0,0 +1,47 @@ +package jsonparser + +import ( + bio "bytes" +) + +// minInt64 '-9223372036854775808' is the smallest representable number in int64 +const minInt64 = `9223372036854775808` + +// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON +func parseInt(bytes []byte) (v int64, ok bool, overflow bool) { + if len(bytes) == 0 { + return 0, false, false + } + + var neg bool = false + if bytes[0] == '-' { + neg = true + bytes = bytes[1:] + } + + var b int64 = 0 + for _, c := range bytes { + if c >= '0' && c <= '9' { + b = (10 * v) + int64(c-'0') + } else { + return 0, false, false + } + if overflow = (b < v); overflow { + break + } + v = b + } + + if overflow { + if neg && bio.Equal(bytes, []byte(minInt64)) { + return b, true, false + } + return 0, false, true + } + + if neg { + return -v, true, false + } else { + return v, true, false + } +} diff --git a/vendor/github.com/buger/jsonparser/bytes_safe.go b/vendor/github.com/buger/jsonparser/bytes_safe.go new file mode 100644 index 00000000..ff16a4a1 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_safe.go @@ -0,0 +1,25 @@ +// +build appengine appenginevm + +package jsonparser + +import ( + "strconv" +) + +// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file) + +func equalStr(b *[]byte, s string) bool { + return string(*b) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(string(*b), 64) +} + +func bytesToString(b *[]byte) string { + return string(*b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/buger/jsonparser/bytes_unsafe.go b/vendor/github.com/buger/jsonparser/bytes_unsafe.go new file mode 100644 index 00000000..589fea87 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_unsafe.go @@ -0,0 +1,44 @@ +// +build !appengine,!appenginevm + +package jsonparser + +import ( + "reflect" + "strconv" + "unsafe" + "runtime" +) + +// +// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6, +// the compiler cannot perfectly inline the function when using a non-pointer slice. That is, +// the non-pointer []byte parameter version is slower than if its function body is manually +// inlined, whereas the pointer []byte version is equally fast to the manually inlined +// version. Instruction count in assembly taken from "go tool compile" confirms this difference. +// +// TODO: Remove hack after Go 1.7 release +// +func equalStr(b *[]byte, s string) bool { + return *(*string)(unsafe.Pointer(b)) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64) +} + +// A hack until issue golang/go#2632 is fixed. +// See: https://github.com/golang/go/issues/2632 +func bytesToString(b *[]byte) string { + return *(*string)(unsafe.Pointer(b)) +} + +func StringToBytes(s string) []byte { + b := make([]byte, 0, 0) + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + runtime.KeepAlive(s) + return b +} diff --git a/vendor/github.com/buger/jsonparser/escape.go b/vendor/github.com/buger/jsonparser/escape.go new file mode 100644 index 00000000..49669b94 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/escape.go @@ -0,0 +1,173 @@ +package jsonparser + +import ( + "bytes" + "unicode/utf8" +) + +// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7 + +const supplementalPlanesOffset = 0x10000 +const highSurrogateOffset = 0xD800 +const lowSurrogateOffset = 0xDC00 + +const basicMultilingualPlaneReservedOffset = 0xDFFF +const basicMultilingualPlaneOffset = 0xFFFF + +func combineUTF16Surrogates(high, low rune) rune { + return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset) +} + +const badHex = -1 + +func h2I(c byte) int { + switch { + case c >= '0' && c <= '9': + return int(c - '0') + case c >= 'A' && c <= 'F': + return int(c - 'A' + 10) + case c >= 'a' && c <= 'f': + return int(c - 'a' + 10) + } + return badHex +} + +// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and +// is not checked. +// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together. +// This function only handles one; decodeUnicodeEscape handles this more complex case. +func decodeSingleUnicodeEscape(in []byte) (rune, bool) { + // We need at least 6 characters total + if len(in) < 6 { + return utf8.RuneError, false + } + + // Convert hex to decimal + h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5]) + if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex { + return utf8.RuneError, false + } + + // Compose the hex digits + return rune(h1<<12 + h2<<8 + h3<<4 + h4), true +} + +// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters, +// which is used to describe UTF16 chars. +// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane +func isUTF16EncodedRune(r rune) bool { + return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset +} + +func decodeUnicodeEscape(in []byte) (rune, int) { + if r, ok := decodeSingleUnicodeEscape(in); !ok { + // Invalid Unicode escape + return utf8.RuneError, -1 + } else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) { + // Valid Unicode escape in Basic Multilingual Plane + return r, 6 + } else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain + // UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate" + return utf8.RuneError, -1 + } else if r2 < lowSurrogateOffset { + // Invalid UTF16 "low surrogate" + return utf8.RuneError, -1 + } else { + // Valid UTF16 surrogate pair + return combineUTF16Surrogates(r, r2), 12 + } +} + +// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X] +var backslashCharEscapeTable = [...]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns +// how many characters were consumed from 'in' and emitted into 'out'. +// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error. +func unescapeToUTF8(in, out []byte) (inLen int, outLen int) { + if len(in) < 2 || in[0] != '\\' { + // Invalid escape due to insufficient characters for any escape or no initial backslash + return -1, -1 + } + + // https://tools.ietf.org/html/rfc7159#section-7 + switch e := in[1]; e { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid basic 2-character escapes (use lookup table) + out[0] = backslashCharEscapeTable[e] + return 2, 1 + case 'u': + // Unicode escape + if r, inLen := decodeUnicodeEscape(in); inLen == -1 { + // Invalid Unicode escape + return -1, -1 + } else { + // Valid Unicode escape; re-encode as UTF8 + outLen := utf8.EncodeRune(out, r) + return inLen, outLen + } + } + + return -1, -1 +} + +// unescape unescapes the string contained in 'in' and returns it as a slice. +// If 'in' contains no escaped characters: +// Returns 'in'. +// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)): +// 'out' is used to build the unescaped string and is returned with no extra allocation +// Else: +// A new slice is allocated and returned. +func Unescape(in, out []byte) ([]byte, error) { + firstBackslash := bytes.IndexByte(in, '\\') + if firstBackslash == -1 { + return in, nil + } + + // Get a buffer of sufficient size (allocate if needed) + if cap(out) < len(in) { + out = make([]byte, len(in)) + } else { + out = out[0:len(in)] + } + + // Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice) + copy(out, in[:firstBackslash]) + in = in[firstBackslash:] + buf := out[firstBackslash:] + + for len(in) > 0 { + // Unescape the next escaped character + inLen, bufLen := unescapeToUTF8(in, buf) + if inLen == -1 { + return nil, MalformedStringEscapeError + } + + in = in[inLen:] + buf = buf[bufLen:] + + // Copy everything up until the next backslash + nextBackslash := bytes.IndexByte(in, '\\') + if nextBackslash == -1 { + copy(buf, in) + buf = buf[len(in):] + break + } else { + copy(buf, in[:nextBackslash]) + buf = buf[nextBackslash:] + in = in[nextBackslash:] + } + } + + // Trim the out buffer to the amount that was actually emitted + return out[:len(out)-len(buf)], nil +} diff --git a/vendor/github.com/buger/jsonparser/fuzz.go b/vendor/github.com/buger/jsonparser/fuzz.go new file mode 100644 index 00000000..854bd11b --- /dev/null +++ b/vendor/github.com/buger/jsonparser/fuzz.go @@ -0,0 +1,117 @@ +package jsonparser + +func FuzzParseString(data []byte) int { + r, err := ParseString(data) + if err != nil || r == "" { + return 0 + } + return 1 +} + +func FuzzEachKey(data []byte) int { + paths := [][]string{ + {"name"}, + {"order"}, + {"nested", "a"}, + {"nested", "b"}, + {"nested2", "a"}, + {"nested", "nested3", "b"}, + {"arr", "[1]", "b"}, + {"arrInt", "[3]"}, + {"arrInt", "[5]"}, + {"nested"}, + {"arr", "["}, + {"a\n", "b\n"}, + } + EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...) + return 1 +} + +func FuzzDelete(data []byte) int { + Delete(data, "test") + return 1 +} + +func FuzzSet(data []byte) int { + _, err := Set(data, []byte(`"new value"`), "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzObjectEach(data []byte) int { + _ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error { + return nil + }) + return 1 +} + +func FuzzParseFloat(data []byte) int { + _, err := ParseFloat(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseInt(data []byte) int { + _, err := ParseInt(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseBool(data []byte) int { + _, err := ParseBoolean(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzTokenStart(data []byte) int { + _ = tokenStart(data) + return 1 +} + +func FuzzGetString(data []byte) int { + _, err := GetString(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetFloat(data []byte) int { + _, err := GetFloat(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetInt(data []byte) int { + _, err := GetInt(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetBoolean(data []byte) int { + _, err := GetBoolean(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetUnsafeString(data []byte) int { + _, err := GetUnsafeString(data, "test") + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh new file mode 100644 index 00000000..c573b0e2 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh @@ -0,0 +1,47 @@ +#!/bin/bash -eu + +git clone https://github.com/dvyukov/go-fuzz-corpus +zip corpus.zip go-fuzz-corpus/json/corpus/* + +cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring + +cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey + +cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete + +cp corpus.zip $OUT/fuzzset_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset + +cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach + +cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat + +cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint + +cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool + +cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart + +cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring + +cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat + +cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint + +cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean + +cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring + diff --git a/vendor/github.com/buger/jsonparser/parser.go b/vendor/github.com/buger/jsonparser/parser.go new file mode 100644 index 00000000..14b80bc4 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/parser.go @@ -0,0 +1,1283 @@ +package jsonparser + +import ( + "bytes" + "errors" + "fmt" + "strconv" +) + +// Errors +var ( + KeyPathNotFoundError = errors.New("Key path not found") + UnknownValueTypeError = errors.New("Unknown value type") + MalformedJsonError = errors.New("Malformed JSON error") + MalformedStringError = errors.New("Value is string, but can't find closing '\"' symbol") + MalformedArrayError = errors.New("Value is array, but can't find closing ']' symbol") + MalformedObjectError = errors.New("Value looks like object, but can't find closing '}' symbol") + MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol") + OverflowIntegerError = errors.New("Value is number, but overflowed while parsing") + MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string") +) + +// How much stack space to allocate for unescaping JSON strings; if a string longer +// than this needs to be escaped, it will result in a heap allocation +const unescapeStackBufSize = 64 + +func tokenEnd(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + return i + } + } + + return len(data) +} + +func findTokenStart(data []byte, token byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case token: + return i + case '[', '{': + return 0 + } + } + + return 0 +} + +func findKeyStart(data []byte, key string) (int, error) { + i := 0 + ln := len(data) + if ln > 0 && (data[0] == '{' || data[0] == '[') { + i = 1 + } + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + if ku, err := Unescape(StringToBytes(key), stackbuf[:]); err == nil { + key = bytesToString(&ku) + } + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + break + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + break + } + + i += valueOffset + + // if string is a key, and key level match + k := data[keyBegin:keyEnd] + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + if keyEscaped { + if ku, err := Unescape(k, stackbuf[:]); err != nil { + break + } else { + k = ku + } + } + + if data[i] == ':' && len(key) == len(k) && bytesToString(&k) == key { + return keyBegin - 1, nil + } + + case '[': + end := blockEnd(data[i:], data[i], ']') + if end != -1 { + i = i + end + } + case '{': + end := blockEnd(data[i:], data[i], '}') + if end != -1 { + i = i + end + } + } + i++ + } + + return -1, KeyPathNotFoundError +} + +func tokenStart(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case '\n', '\r', '\t', ',', '{', '[': + return i + } + } + + return 0 +} + +// Find position of next character which is not whitespace +func nextToken(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Find position of last character which is not whitespace +func lastToken(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func stringEnd(data []byte) (int, bool) { + escaped := false + for i, c := range data { + if c == '"' { + if !escaped { + return i + 1, false + } else { + j := i - 1 + for { + if j < 0 || data[j] != '\\' { + return i + 1, true // even number of backslashes + } + j-- + if j < 0 || data[j] != '\\' { + break // odd number of backslashes + } + j-- + + } + } + } else if c == '\\' { + escaped = true + } + } + + return -1, escaped +} + +// Find end of the data structure, array or object. +// For array openSym and closeSym will be '[' and ']', for object '{' and '}' +func blockEnd(data []byte, openSym byte, closeSym byte) int { + level := 0 + i := 0 + ln := len(data) + + for i < ln { + switch data[i] { + case '"': // If inside string, skip it + se, _ := stringEnd(data[i+1:]) + if se == -1 { + return -1 + } + i += se + case openSym: // If open symbol, increase level + level++ + case closeSym: // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + return i + 1 + } + } + i++ + } + + return -1 +} + +func searchKeys(data []byte, keys ...string) int { + keyLevel := 0 + level := 0 + i := 0 + ln := len(data) + lk := len(keys) + lastMatched := true + + if lk == 0 { + return 0 + } + + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key + if data[i] == ':' { + if level < 1 { + return -1 + } + + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + + if level <= len(keys) { + if equalStr(&keyUnesc, keys[level-1]) { + lastMatched = true + + // if key level match + if keyLevel == level-1 { + keyLevel++ + // If we found all keys in path + if keyLevel == lk { + return i + 1 + } + } + } else { + lastMatched = false + } + } else { + return -1 + } + } else { + i-- + } + case '{': + + // in case parent key is matched then only we will increase the level otherwise can directly + // can move to the end of this block + if !lastMatched { + end := blockEnd(data[i:], '{', '}') + if end == -1 { + return -1 + } + i += end - 1 + } else { + level++ + } + case '}': + level-- + if level == keyLevel { + keyLevel-- + } + case '[': + // If we want to get array element by index + if keyLevel == level && keys[level][0] == '[' { + var keyLen = len(keys[level]) + if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' { + return -1 + } + aIdx, err := strconv.Atoi(keys[level][1 : keyLen-1]) + if err != nil { + return -1 + } + var curIdx int + var valueFound []byte + var valueOffset int + var curI = i + ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if curIdx == aIdx { + valueFound = value + valueOffset = offset + if dataType == String { + valueOffset = valueOffset - 2 + valueFound = data[curI+valueOffset : curI+valueOffset+len(value)+2] + } + } + curIdx += 1 + }) + + if valueFound == nil { + return -1 + } else { + subIndex := searchKeys(valueFound, keys[level+1:]...) + if subIndex < 0 { + return -1 + } + return i + valueOffset + subIndex + } + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ':': // If encountered, JSON data is malformed + return -1 + } + + i++ + } + + return -1 +} + +func sameTree(p1, p2 []string) bool { + minLen := len(p1) + if len(p2) < minLen { + minLen = len(p2) + } + + for pi_1, p_1 := range p1[:minLen] { + if p2[pi_1] != p_1 { + return false + } + } + + return true +} + +func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int { + var x struct{} + pathFlags := make([]bool, len(paths)) + var level, pathsMatched, i int + ln := len(data) + + var maxPath int + for _, p := range paths { + if len(p) > maxPath { + maxPath = len(p) + } + } + + pathsBuf := make([]string, maxPath) + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key, and key level match + if data[i] == ':' { + match := -1 + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else { + var stackbuf [unescapeStackBufSize]byte + if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + } + + if maxPath >= level { + if level < 1 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + pathsBuf[level-1] = bytesToString(&keyUnesc) + for pi, p := range paths { + if len(p) != level || pathFlags[pi] || !equalStr(&keyUnesc, p[level-1]) || !sameTree(p, pathsBuf[:level]) { + continue + } + + match = pi + + pathsMatched++ + pathFlags[pi] = true + + v, dt, _, e := Get(data[i+1:]) + cb(pi, v, dt, e) + + if pathsMatched == len(paths) { + break + } + } + if pathsMatched == len(paths) { + return i + } + } + + if match == -1 { + tokenOffset := nextToken(data[i+1:]) + i += tokenOffset + + if data[i] == '{' { + blockSkip := blockEnd(data[i:], '{', '}') + i += blockSkip + 1 + } + } + + if i < ln { + switch data[i] { + case '{', '}', '[', '"': + i-- + } + } + } else { + i-- + } + case '{': + level++ + case '}': + level-- + case '[': + var ok bool + arrIdxFlags := make(map[int]struct{}) + pIdxFlags := make([]bool, len(paths)) + + if level < 0 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + for pi, p := range paths { + if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { + continue + } + if len(p[level]) >= 2 { + aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) + arrIdxFlags[aIdx] = x + pIdxFlags[pi] = true + } + } + + if len(arrIdxFlags) > 0 { + level++ + + var curIdx int + arrOff, _ := ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if _, ok = arrIdxFlags[curIdx]; ok { + for pi, p := range paths { + if pIdxFlags[pi] { + aIdx, _ := strconv.Atoi(p[level-1][1 : len(p[level-1])-1]) + + if curIdx == aIdx { + of := searchKeys(value, p[level:]...) + + pathsMatched++ + pathFlags[pi] = true + + if of != -1 { + v, dt, _, e := Get(value[of:]) + cb(pi, v, dt, e) + } + } + } + } + } + + curIdx += 1 + }) + + if pathsMatched == len(paths) { + return i + } + + i += arrOff - 1 + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ']': + level-- + } + + i++ + } + + return -1 +} + +// Data types available in valid JSON data. +type ValueType int + +const ( + NotExist = ValueType(iota) + String + Number + Object + Array + Boolean + Null + Unknown +) + +func (vt ValueType) String() string { + switch vt { + case NotExist: + return "non-existent" + case String: + return "string" + case Number: + return "number" + case Object: + return "object" + case Array: + return "array" + case Boolean: + return "boolean" + case Null: + return "null" + default: + return "unknown" + } +} + +var ( + trueLiteral = []byte("true") + falseLiteral = []byte("false") + nullLiteral = []byte("null") +) + +func createInsertComponent(keys []string, setValue []byte, comma, object bool) []byte { + isIndex := string(keys[0][0]) == "[" + offset := 0 + lk := calcAllocateSpace(keys, setValue, comma, object) + buffer := make([]byte, lk, lk) + if comma { + offset += WriteToBuffer(buffer[offset:], ",") + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + if object { + offset += WriteToBuffer(buffer[offset:], "{") + } + if !isIndex { + offset += WriteToBuffer(buffer[offset:], "\"") + offset += WriteToBuffer(buffer[offset:], keys[0]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + offset += WriteToBuffer(buffer[offset:], "{\"") + offset += WriteToBuffer(buffer[offset:], keys[i]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + offset += WriteToBuffer(buffer[offset:], string(setValue)) + for i := len(keys) - 1; i > 0; i-- { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "]") + } else { + offset += WriteToBuffer(buffer[offset:], "}") + } + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "]") + } + if object && !isIndex { + offset += WriteToBuffer(buffer[offset:], "}") + } + return buffer +} + +func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int { + isIndex := string(keys[0][0]) == "[" + lk := 0 + if comma { + // , + lk += 1 + } + if isIndex && !comma { + // [] + lk += 2 + } else { + if object { + // { + lk += 1 + } + if !isIndex { + // "keys[0]" + lk += len(keys[0]) + 3 + } + } + + + lk += len(setValue) + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + // [] + lk += 2 + } else { + // {"keys[i]":setValue} + lk += len(keys[i]) + 5 + } + } + + if object && !isIndex { + // } + lk += 1 + } + + return lk +} + +func WriteToBuffer(buffer []byte, str string) int { + copy(buffer, str) + return len(str) +} + +/* + +Del - Receives existing data structure, path to delete. + +Returns: +`data` - return modified data + +*/ +func Delete(data []byte, keys ...string) []byte { + lk := len(keys) + if lk == 0 { + return data[:0] + } + + array := false + if len(keys[lk-1]) > 0 && string(keys[lk-1][0]) == "[" { + array = true + } + + var startOffset, keyOffset int + endOffset := len(data) + var err error + if !array { + if len(keys) > 1 { + _, _, startOffset, endOffset, err = internalGet(data, keys[:lk-1]...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + } + + keyOffset, err = findKeyStart(data[startOffset:endOffset], keys[lk-1]) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + keyOffset += startOffset + _, _, _, subEndOffset, _ := internalGet(data[startOffset:endOffset], keys[lk-1]) + endOffset = startOffset + subEndOffset + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == " "[0] && len(data) > endOffset+tokEnd+1 && data[endOffset+tokEnd+1] == ","[0] { + endOffset += tokEnd + 2 + } else if data[endOffset+tokEnd] == "}"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } else { + _, _, keyOffset, endOffset, err = internalGet(data, keys...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == "]"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } + + // We need to remove remaining trailing comma if we delete las element in the object + prevTok := lastToken(data[:keyOffset]) + remainedValue := data[endOffset:] + + var newOffset int + if nextToken(remainedValue) > -1 && remainedValue[nextToken(remainedValue)] == '}' && data[prevTok] == ',' { + newOffset = prevTok + } else { + newOffset = prevTok + 1 + } + + // We have to make a copy here if we don't want to mangle the original data, because byte slices are + // accessed by reference and not by value + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = append(dataCopy[:newOffset], dataCopy[endOffset:]...) + + return data +} + +/* + +Set - Receives existing data structure, path to set, and data to set at that key. + +Returns: +`value` - modified byte array +`err` - On any parsing error + +*/ +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { + // ensure keys are set + if len(keys) == 0 { + return nil, KeyPathNotFoundError + } + + _, _, startOffset, endOffset, err := internalGet(data, keys...) + if err != nil { + if err != KeyPathNotFoundError { + // problem parsing the data + return nil, err + } + // full path doesnt exist + // does any subpath exist? + var depth int + for i := range keys { + _, _, start, end, sErr := internalGet(data, keys[:i+1]...) + if sErr != nil { + break + } else { + endOffset = end + startOffset = start + depth++ + } + } + comma := true + object := false + if endOffset == -1 { + firstToken := nextToken(data) + // We can't set a top-level key if data isn't an object + if firstToken < 0 || data[firstToken] != '{' { + return nil, KeyPathNotFoundError + } + // Don't need a comma if the input is an empty object + secondToken := firstToken + 1 + nextToken(data[firstToken+1:]) + if data[secondToken] == '}' { + comma = false + } + // Set the top level key at the end (accounting for any trailing whitespace) + // This assumes last token is valid like '}', could check and return error + endOffset = lastToken(data) + } + depthOffset := endOffset + if depth != 0 { + // if subpath is a non-empty object, add to it + // or if subpath is a non-empty array, add to it + if (data[startOffset] == '{' && data[startOffset+1+nextToken(data[startOffset+1:])] != '}') || + (data[startOffset] == '[' && data[startOffset+1+nextToken(data[startOffset+1:])] == '{') && keys[depth:][0][0] == 91 { + depthOffset-- + startOffset = depthOffset + // otherwise, over-write it with a new object + } else { + comma = false + object = true + } + } else { + startOffset = depthOffset + } + value = append(data[:startOffset], append(createInsertComponent(keys[depth:], setValue, comma, object), data[depthOffset:]...)...) + } else { + // path currently exists + startComponent := data[:startOffset] + endComponent := data[endOffset:] + + value = make([]byte, len(startComponent)+len(endComponent)+len(setValue)) + newEndOffset := startOffset + len(setValue) + copy(value[0:startOffset], startComponent) + copy(value[startOffset:newEndOffset], setValue) + copy(value[newEndOffset:], endComponent) + } + return value, nil +} + +func getType(data []byte, offset int) ([]byte, ValueType, int, error) { + var dataType ValueType + endOffset := offset + + // if string value + if data[offset] == '"' { + dataType = String + if idx, _ := stringEnd(data[offset+1:]); idx != -1 { + endOffset += idx + 1 + } else { + return nil, dataType, offset, MalformedStringError + } + } else if data[offset] == '[' { // if array value + dataType = Array + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '[', ']') + + if endOffset == -1 { + return nil, dataType, offset, MalformedArrayError + } + + endOffset += offset + } else if data[offset] == '{' { // if object value + dataType = Object + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '{', '}') + + if endOffset == -1 { + return nil, dataType, offset, MalformedObjectError + } + + endOffset += offset + } else { + // Number, Boolean or None + end := tokenEnd(data[endOffset:]) + + if end == -1 { + return nil, dataType, offset, MalformedValueError + } + + value := data[offset : endOffset+end] + + switch data[offset] { + case 't', 'f': // true or false + if bytes.Equal(value, trueLiteral) || bytes.Equal(value, falseLiteral) { + dataType = Boolean + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case 'u', 'n': // undefined or null + if bytes.Equal(value, nullLiteral) { + dataType = Null + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + dataType = Number + default: + return nil, Unknown, offset, UnknownValueTypeError + } + + endOffset += end + } + return data[offset:endOffset], dataType, endOffset, nil +} + +/* +Get - Receives data structure, and key path to extract value from. + +Returns: +`value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +`dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +`offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +`err` - If key not found or any other parsing issue it should return error. If key not found it also sets `dataType` to `NotExist` + +Accept multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys provided it will try to extract closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. +*/ +func Get(data []byte, keys ...string) (value []byte, dataType ValueType, offset int, err error) { + a, b, _, d, e := internalGet(data, keys...) + return a, b, d, e +} + +func internalGet(data []byte, keys ...string) (value []byte, dataType ValueType, offset, endOffset int, err error) { + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return nil, NotExist, -1, -1, KeyPathNotFoundError + } + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return nil, NotExist, offset, -1, MalformedJsonError + } + + offset += nO + value, dataType, endOffset, err = getType(data, offset) + if err != nil { + return value, dataType, offset, endOffset, err + } + + // Strip quotes from string values + if dataType == String { + value = value[1 : len(value)-1] + } + + return value[:len(value):len(value)], dataType, offset, endOffset, nil +} + +// ArrayEach is used when iterating arrays, accepts a callback function with the same return arguments as `Get`. +func ArrayEach(data []byte, cb func(value []byte, dataType ValueType, offset int, err error), keys ...string) (offset int, err error) { + if len(data) == 0 { + return -1, MalformedObjectError + } + + nT := nextToken(data) + if nT == -1 { + return -1, MalformedJsonError + } + + offset = nT + 1 + + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return offset, KeyPathNotFoundError + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] != '[' { + return offset, MalformedArrayError + } + + offset++ + } + + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] == ']' { + return offset, nil + } + + for true { + v, t, o, e := Get(data[offset:]) + + if e != nil { + return offset, e + } + + if o == 0 { + break + } + + if t != NotExist { + cb(v, t, offset+o-len(v), e) + } + + if e != nil { + break + } + + offset += o + + skipToToken := nextToken(data[offset:]) + if skipToToken == -1 { + return offset, MalformedArrayError + } + offset += skipToToken + + if data[offset] == ']' { + break + } + + if data[offset] != ',' { + return offset, MalformedArrayError + } + + offset++ + } + + return offset, nil +} + +// ObjectEach iterates over the key-value pairs of a JSON object, invoking a given callback for each such entry +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) { + offset := 0 + + // Descend to the desired key, if requested + if len(keys) > 0 { + if off := searchKeys(data, keys...); off == -1 { + return KeyPathNotFoundError + } else { + offset = off + } + } + + // Validate and skip past opening brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedObjectError + } else if offset += off; data[offset] != '{' { + return MalformedObjectError + } else { + offset++ + } + + // Skip to the first token inside the object, or stop if we find the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] == '}' { + return nil + } + + // Loop pre-condition: data[offset] points to what should be either the next entry's key, or the closing brace (if it's anything else, the JSON is malformed) + for offset < len(data) { + // Step 1: find the next key + var key []byte + + // Check what the the next token is: start of string, end of object, or something else (error) + switch data[offset] { + case '"': + offset++ // accept as string and skip opening quote + case '}': + return nil // we found the end of the object; stop and return success + default: + return MalformedObjectError + } + + // Find the end of the key string + var keyEscaped bool + if off, esc := stringEnd(data[offset:]); off == -1 { + return MalformedJsonError + } else { + key, keyEscaped = data[offset:offset+off-1], esc + offset += off + } + + // Unescape the string if needed + if keyEscaped { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if keyUnescaped, err := Unescape(key, stackbuf[:]); err != nil { + return MalformedStringEscapeError + } else { + key = keyUnescaped + } + } + + // Step 2: skip the colon + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] != ':' { + return MalformedJsonError + } else { + offset++ + } + + // Step 3: find the associated value, then invoke the callback + if value, valueType, off, err := Get(data[offset:]); err != nil { + return err + } else if err := callback(key, value, valueType, offset+off); err != nil { // Invoke the callback here! + return err + } else { + offset += off + } + + // Step 4: skip over the next comma to the following token, or stop if we hit the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + switch data[offset] { + case '}': + return nil // Stop if we hit the close brace + case ',': + offset++ // Ignore the comma + default: + return MalformedObjectError + } + } + + // Skip to the next token after the comma + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + } + } + + return MalformedObjectError // we shouldn't get here; it's expected that we will return via finding the ending brace +} + +// GetUnsafeString returns the value retrieved by `Get`, use creates string without memory allocation by mapping string to slice memory. It does not handle escape symbols. +func GetUnsafeString(data []byte, keys ...string) (val string, err error) { + v, _, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + return bytesToString(&v), nil +} + +// GetString returns the value retrieved by `Get`, cast to a string if possible, trying to properly handle escape and utf8 symbols +// If key data type do not match, it will return an error. +func GetString(data []byte, keys ...string) (val string, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + if t != String { + return "", fmt.Errorf("Value is not a string: %s", string(v)) + } + + // If no escapes return raw content + if bytes.IndexByte(v, '\\') == -1 { + return string(v), nil + } + + return ParseString(v) +} + +// GetFloat returns the value retrieved by `Get`, cast to a float64 if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return an error. +func GetFloat(data []byte, keys ...string) (val float64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseFloat(v) +} + +// GetInt returns the value retrieved by `Get`, cast to a int64 if possible. +// If key data type do not match, it will return an error. +func GetInt(data []byte, keys ...string) (val int64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseInt(v) +} + +// GetBoolean returns the value retrieved by `Get`, cast to a bool if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return error. +func GetBoolean(data []byte, keys ...string) (val bool, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return false, e + } + + if t != Boolean { + return false, fmt.Errorf("Value is not a boolean: %s", string(v)) + } + + return ParseBoolean(v) +} + +// ParseBoolean parses a Boolean ValueType into a Go bool (not particularly useful, but here for completeness) +func ParseBoolean(b []byte) (bool, error) { + switch { + case bytes.Equal(b, trueLiteral): + return true, nil + case bytes.Equal(b, falseLiteral): + return false, nil + default: + return false, MalformedValueError + } +} + +// ParseString parses a String ValueType into a Go string (the main parsing work is unescaping the JSON string) +func ParseString(b []byte) (string, error) { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if bU, err := Unescape(b, stackbuf[:]); err != nil { + return "", MalformedValueError + } else { + return string(bU), nil + } +} + +// ParseNumber parses a Number ValueType into a Go float64 +func ParseFloat(b []byte) (float64, error) { + if v, err := parseFloat(&b); err != nil { + return 0, MalformedValueError + } else { + return v, nil + } +} + +// ParseInt parses a Number ValueType into a Go int64 +func ParseInt(b []byte) (int64, error) { + if v, ok, overflow := parseInt(b); !ok { + if overflow { + return 0, OverflowIntegerError + } + return 0, MalformedValueError + } else { + return v, nil + } +} diff --git a/vendor/github.com/c2h5oh/datasize/README.md b/vendor/github.com/c2h5oh/datasize/README.md index ac0cf858..f6e82858 100644 --- a/vendor/github.com/c2h5oh/datasize/README.md +++ b/vendor/github.com/c2h5oh/datasize/README.md @@ -19,7 +19,7 @@ Just like `time` package provides `time.Second`, `time.Day` constants `datasize` Just like `time` package provides `duration.Nanoseconds() uint64 `, `duration.Hours() float64` helpers `datasize` has. * `ByteSize.Bytes() uint64` -* `ByteSize.Kilobytes() float4` +* `ByteSize.Kilobytes() float64` * `ByteSize.Megabytes() float64` * `ByteSize.Gigabytes() float64` * `ByteSize.Terabytes() float64` diff --git a/vendor/github.com/c2h5oh/datasize/datasize.go b/vendor/github.com/c2h5oh/datasize/datasize.go index 67547881..2ce76275 100644 --- a/vendor/github.com/c2h5oh/datasize/datasize.go +++ b/vendor/github.com/c2h5oh/datasize/datasize.go @@ -215,3 +215,25 @@ BitsError: *b = 0 return &strconv.NumError{fnUnmarshalText, string(t0), ErrBits} } + +func Parse(t []byte) (ByteSize, error) { + var v ByteSize + err := v.UnmarshalText(t) + return v, err +} + +func MustParse(t []byte) ByteSize { + v, err := Parse(t) + if err != nil { + panic(err) + } + return v +} + +func ParseString(s string) (ByteSize, error) { + return Parse([]byte(s)) +} + +func MustParseString(s string) ByteSize { + return MustParse([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md new file mode 100644 index 00000000..0982fd25 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/README.md @@ -0,0 +1,50 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +The API is very small, taking its cue from the other hashing packages in the +standard library: + + $ go doc github.com/cespare/xxhash ! + package xxhash // import "github.com/cespare/xxhash" + + Package xxhash implements the 64-bit variant of xxHash (XXH64) as described + at http://cyan4973.github.io/xxHash/. + + func New() hash.Hash64 + func Sum64(b []byte) uint64 + func Sum64String(s string) uint64 + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64 against another popular Go XXH64 implementation, +[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): + +| input size | OneOfOne | cespare (purego) | cespare | +| --- | --- | --- | --- | +| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | +| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | +| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | +| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | + +These numbers were generated with: + +``` +$ go test -benchtime 10s -bench '/OneOfOne,' +$ go test -tags purego -benchtime 10s -bench '/xxhash,' +$ go test -benchtime 10s -bench '/xxhash,' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go new file mode 100644 index 00000000..f3eac5eb --- /dev/null +++ b/vendor/github.com/cespare/xxhash/rotate.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package xxhash + +// TODO(caleb): After Go 1.10 comes out, remove this fallback code. + +func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } +func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } +func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } +func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } +func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } +func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } +func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } +func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go new file mode 100644 index 00000000..b99612ba --- /dev/null +++ b/vendor/github.com/cespare/xxhash/rotate19.go @@ -0,0 +1,14 @@ +// +build go1.9 + +package xxhash + +import "math/bits" + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go new file mode 100644 index 00000000..f896bd28 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash.go @@ -0,0 +1,168 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "hash" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +type xxh struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total int + mem [32]byte + n int // how much of mem is used +} + +// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. +func New() hash.Hash64 { + var x xxh + x.Reset() + return &x +} + +func (x *xxh) Reset() { + x.n = 0 + x.total = 0 + x.v1 = prime1v + prime2 + x.v2 = prime2 + x.v3 = 0 + x.v4 = -prime1v +} + +func (x *xxh) Size() int { return 8 } +func (x *xxh) BlockSize() int { return 32 } + +// Write adds more data to x. It always returns len(b), nil. +func (x *xxh) Write(b []byte) (n int, err error) { + n = len(b) + x.total += len(b) + + if x.n+len(b) < 32 { + // This new data doesn't even fill the current block. + copy(x.mem[x.n:], b) + x.n += len(b) + return + } + + if x.n > 0 { + // Finish off the partial block. + copy(x.mem[x.n:], b) + x.v1 = round(x.v1, u64(x.mem[0:8])) + x.v2 = round(x.v2, u64(x.mem[8:16])) + x.v3 = round(x.v3, u64(x.mem[16:24])) + x.v4 = round(x.v4, u64(x.mem[24:32])) + b = b[32-x.n:] + x.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + b = writeBlocks(x, b) + } + + // Store any remaining partial block. + copy(x.mem[:], b) + x.n = len(b) + + return +} + +func (x *xxh) Sum(b []byte) []byte { + s := x.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +func (x *xxh) Sum64() uint64 { + var h uint64 + + if x.total >= 32 { + v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = x.v3 + prime5 + } + + h += uint64(x.total) + + i, end := 0, x.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(x.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(x.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(x.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go new file mode 100644 index 00000000..d6176526 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.go @@ -0,0 +1,12 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..757f2011 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.s @@ -0,0 +1,233 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the x pointer. + +// func writeBlocks(x *xxh, b []byte) []byte +TEXT ·writeBlocks(SB), NOSPLIT, $0-56 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from x. + MOVQ x+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to x. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // Construct return slice. + // NOTE: It's important that we don't construct a slice that has a base + // pointer off the end of the original slice, as in Go 1.7+ this will + // cause runtime crashes. (See discussion in, for example, + // https://github.com/golang/go/issues/16772.) + // Therefore, we calculate the length/cap first, and if they're zero, we + // keep the old base. This is what the compiler does as well if you + // write code like + // b = b[len(b):] + + // New length is 32 - (CX - BX) -> BX+32 - CX. + ADDQ $32, BX + SUBQ CX, BX + JZ afterSetBase + + MOVQ CX, ret_base+32(FP) + +afterSetBase: + MOVQ BX, ret_len+40(FP) + MOVQ BX, ret_cap+48(FP) // set cap == len + + RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go new file mode 100644 index 00000000..c68d13f8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_other.go @@ -0,0 +1,75 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // x := New() + // x.Write(b) + // return x.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(x *xxh, b []byte) []byte { + v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 + return b +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go new file mode 100644 index 00000000..dfa15ab7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_safe.go @@ -0,0 +1,10 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go new file mode 100644 index 00000000..d2b64e8b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go @@ -0,0 +1,30 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +// +// TODO(caleb): Consider removing this if an optimization is ever added to make +// it unnecessary: https://golang.org/issue/2205. +// +// TODO(caleb): We still have a function call; we could instead write Go/asm +// copies of Sum64 for strings to squeeze out a bit more speed. +func Sum64String(s string) uint64 { + // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ + // for some discussion about this unsafe conversion. + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE new file mode 100644 index 00000000..22080f73 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go new file mode 100644 index 00000000..7a6f8203 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go @@ -0,0 +1,79 @@ +package rendezvous + +type Rendezvous struct { + nodes map[string]int + nstr []string + nhash []uint64 + hash Hasher +} + +type Hasher func(s string) uint64 + +func New(nodes []string, hash Hasher) *Rendezvous { + r := &Rendezvous{ + nodes: make(map[string]int, len(nodes)), + nstr: make([]string, len(nodes)), + nhash: make([]uint64, len(nodes)), + hash: hash, + } + + for i, n := range nodes { + r.nodes[n] = i + r.nstr[i] = n + r.nhash[i] = hash(n) + } + + return r +} + +func (r *Rendezvous) Lookup(k string) string { + // short-circuit if we're empty + if len(r.nodes) == 0 { + return "" + } + + khash := r.hash(k) + + var midx int + var mhash = xorshiftMult64(khash ^ r.nhash[0]) + + for i, nhash := range r.nhash[1:] { + if h := xorshiftMult64(khash ^ nhash); h > mhash { + midx = i + 1 + mhash = h + } + } + + return r.nstr[midx] +} + +func (r *Rendezvous) Add(node string) { + r.nodes[node] = len(r.nstr) + r.nstr = append(r.nstr, node) + r.nhash = append(r.nhash, r.hash(node)) +} + +func (r *Rendezvous) Remove(node string) { + // find index of node to remove + nidx := r.nodes[node] + + // remove from the slices + l := len(r.nstr) + r.nstr[nidx] = r.nstr[l] + r.nstr = r.nstr[:l] + + r.nhash[nidx] = r.nhash[l] + r.nhash = r.nhash[:l] + + // update the map + delete(r.nodes, node) + moved := r.nstr[nidx] + r.nodes[moved] = nidx +} + +func xorshiftMult64(x uint64) uint64 { + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + return x * 2685821657736338717 +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 02a73ccf..352018e7 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,10 @@ # Change history of go-restful +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + ## [v3.10.1] - 2022-11-19 - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359d..85da9012 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 830ebf14..827f471d 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -353,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return path.Join(path1, path2) +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/facette/natsort/LICENSE b/vendor/github.com/facette/natsort/LICENSE new file mode 100644 index 00000000..cc8f232b --- /dev/null +++ b/vendor/github.com/facette/natsort/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2015, Vincent Batoufflet and Marc Falzon +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the authors nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/facette/natsort/README.md b/vendor/github.com/facette/natsort/README.md new file mode 100644 index 00000000..a4362b72 --- /dev/null +++ b/vendor/github.com/facette/natsort/README.md @@ -0,0 +1,104 @@ +# natsort: natural strings sorting in Go + +This is an implementation of the "Alphanum Algorithm" by [Dave Koelle][0] in Go. + +[![GoDoc](https://godoc.org/facette.io/natsort?status.svg)](https://godoc.org/facette.io/natsort) + +## Usage + +```go +package main + +import ( + "fmt" + "strings" + + "facette.io/natsort" +) + +func main() { + list := []string{ + "1000X Radonius Maximus", + "10X Radonius", + "200X Radonius", + "20X Radonius", + "20X Radonius Prime", + "30X Radonius", + "40X Radonius", + "Allegia 50 Clasteron", + "Allegia 500 Clasteron", + "Allegia 50B Clasteron", + "Allegia 51 Clasteron", + "Allegia 6R Clasteron", + "Alpha 100", + "Alpha 2", + "Alpha 200", + "Alpha 2A", + "Alpha 2A-8000", + "Alpha 2A-900", + "Callisto Morphamax", + "Callisto Morphamax 500", + "Callisto Morphamax 5000", + "Callisto Morphamax 600", + "Callisto Morphamax 6000 SE", + "Callisto Morphamax 6000 SE2", + "Callisto Morphamax 700", + "Callisto Morphamax 7000", + "Xiph Xlater 10000", + "Xiph Xlater 2000", + "Xiph Xlater 300", + "Xiph Xlater 40", + "Xiph Xlater 5", + "Xiph Xlater 50", + "Xiph Xlater 500", + "Xiph Xlater 5000", + "Xiph Xlater 58", + } + + natsort.Sort(list) + + fmt.Println(strings.Join(list, "\n")) +} +``` + +Output: + +``` +10X Radonius +20X Radonius +20X Radonius Prime +30X Radonius +40X Radonius +200X Radonius +1000X Radonius Maximus +Allegia 6R Clasteron +Allegia 50 Clasteron +Allegia 50B Clasteron +Allegia 51 Clasteron +Allegia 500 Clasteron +Alpha 2 +Alpha 2A +Alpha 2A-900 +Alpha 2A-8000 +Alpha 100 +Alpha 200 +Callisto Morphamax +Callisto Morphamax 500 +Callisto Morphamax 600 +Callisto Morphamax 700 +Callisto Morphamax 5000 +Callisto Morphamax 6000 SE +Callisto Morphamax 6000 SE2 +Callisto Morphamax 7000 +Xiph Xlater 5 +Xiph Xlater 40 +Xiph Xlater 50 +Xiph Xlater 58 +Xiph Xlater 300 +Xiph Xlater 500 +Xiph Xlater 2000 +Xiph Xlater 5000 +Xiph Xlater 10000 +``` + +[0]: http://davekoelle.com/alphanum.html diff --git a/vendor/github.com/facette/natsort/natsort.go b/vendor/github.com/facette/natsort/natsort.go new file mode 100644 index 00000000..5c3c28d5 --- /dev/null +++ b/vendor/github.com/facette/natsort/natsort.go @@ -0,0 +1,85 @@ +// Package natsort implements natural strings sorting +package natsort + +import ( + "regexp" + "sort" + "strconv" +) + +type stringSlice []string + +func (s stringSlice) Len() int { + return len(s) +} + +func (s stringSlice) Less(a, b int) bool { + return Compare(s[a], s[b]) +} + +func (s stringSlice) Swap(a, b int) { + s[a], s[b] = s[b], s[a] +} + +var chunkifyRegexp = regexp.MustCompile(`(\d+|\D+)`) + +func chunkify(s string) []string { + return chunkifyRegexp.FindAllString(s, -1) +} + +// Sort sorts a list of strings in a natural order +func Sort(l []string) { + sort.Sort(stringSlice(l)) +} + +// Compare returns true if the first string precedes the second one according to natural order +func Compare(a, b string) bool { + chunksA := chunkify(a) + chunksB := chunkify(b) + + nChunksA := len(chunksA) + nChunksB := len(chunksB) + + for i := range chunksA { + if i >= nChunksB { + return false + } + + aInt, aErr := strconv.Atoi(chunksA[i]) + bInt, bErr := strconv.Atoi(chunksB[i]) + + // If both chunks are numeric, compare them as integers + if aErr == nil && bErr == nil { + if aInt == bInt { + if i == nChunksA-1 { + // We reached the last chunk of A, thus B is greater than A + return true + } else if i == nChunksB-1 { + // We reached the last chunk of B, thus A is greater than B + return false + } + + continue + } + + return aInt < bInt + } + + // So far both strings are equal, continue to next chunk + if chunksA[i] == chunksB[i] { + if i == nChunksA-1 { + // We reached the last chunk of A, thus B is greater than A + return true + } else if i == nChunksB-1 { + // We reached the last chunk of B, thus A is greater than B + return false + } + + continue + } + + return chunksA[i] < chunksB[i] + } + + return false +} diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 00000000..be01c558 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801d..0cffafa7 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f..99fe8be9 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 7accdb0c..e52f0cd0 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -21,13 +21,13 @@ limitations under the License. // github.com/go-logr/logr.LogSink with output through an arbitrary // "write" function. See New and NewJSON for details. // -// Custom LogSinks +// # Custom LogSinks // // For users who need more control, a funcr.Formatter can be embedded inside // your own custom LogSink implementation. This is useful when the LogSink // needs to implement additional methods, for example. // -// Formatting +// # Formatting // // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for // values which are being logged. When rendering a struct, funcr will use Go's @@ -37,6 +37,7 @@ package funcr import ( "bytes" "encoding" + "encoding/json" "fmt" "path/filepath" "reflect" @@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { prefix: "", values: nil, depth: 0, - opts: opts, + opts: &opts, } return f } @@ -231,7 +232,7 @@ type Formatter struct { values []interface{} valuesStr string depth int - opts Options + opts *Options } // outputFormat indicates which outputFormat to use. @@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if flags&flagRawStruct == 0 { buf.WriteByte('{') } + printComma := false // testing i>0 is not enough because of JSON omitted fields for i := 0; i < t.NumField(); i++ { fld := t.Field(i) if fld.PkgPath != "" { @@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if omitempty && isEmpty(v.Field(i)) { continue } - if i > 0 { + if printComma { buf.WriteByte(',') } + printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) continue @@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s } return buf.String() case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c3b56b3d..e027aea3 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -124,15 +132,15 @@ limitations under the License. // around. For cases where passing a logger is optional, a pointer to Logger // should be used. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -141,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -201,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -244,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -254,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -442,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -467,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index 77f1f92c..c13f3435 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError { for _, er := range errs.Errors { switch e := er.(type) { case *CompositeError: - if len(e.Errors) > 0 { + if e != nil && len(e.Errors) > 0 { flat := flattenComposite(e) if len(flat.Errors) > 0 { res = append(res, flat.Errors...) diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853d..de60dc7d 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,26 +91,26 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() @@ -159,7 +159,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -210,7 +210,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -241,7 +241,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { @@ -363,6 +363,127 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + } + } + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return err + } + case '[': + if err := drainSingle(dec); err != nil { + return err + } + } + } + } + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index c458b49b..582f0fd4 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -16,6 +16,7 @@ package spec import ( "encoding/json" + "strconv" "strings" "github.com/go-openapi/jsonpointer" @@ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) { return "", false } +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + // GetBool gets a string value from the extensions func (e Extensions) GetBool(key string) (bool, bool) { if v, ok := e[strings.ToLower(key)]; ok { diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go index 2af13787..91d2435f 100644 --- a/vendor/github.com/go-openapi/spec/properties.go +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { func (items OrderSchemaItems) Len() int { return len(items) } func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetString("x-order") - ij, okj := items[j].Extensions.GetString("x-order") + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") if oki { if okj { defer func() { @@ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) { ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() } }() - return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() + return ii < ij } return true } else if okj { diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 4efb6f86..16c3076f 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -19,6 +19,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "github.com/go-openapi/swag" ) @@ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } @@ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } + if v, ok := res["default"]; ok { - r.Default = &v + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes delete(res, "default") } for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp } - r.StatusCodeResponses[nk] = v } } return nil diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index d36b2566..be4899cb 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -14,31 +14,40 @@ linters-settings: min-occurrences: 4 linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - wrapcheck - - testpackage - - nlreturn - - gofumpt - - goerr113 - - gci - - gomnd - - godot - - exhaustivestruct - - paralleltest - - varnamelen - - ireturn - - exhaustruct - #- thelper + enable: + - revive + - goimports + - gosec + - unparam + - unconvert + - predeclared + - prealloc + - misspell + + # disable: + # - maligned + # - lll + # - gochecknoinits + # - gochecknoglobals + # - godox + # - gocognit + # - whitespace + # - wsl + # - funlen + # - wrapcheck + # - testpackage + # - nlreturn + # - gofumpt + # - goerr113 + # - gci + # - gomnd + # - godot + # - exhaustivestruct + # - paralleltest + # - varnamelen + # - ireturn + # - exhaustruct + # #- thelper issues: exclude-rules: diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 8740b150..a8a3604a 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -142,7 +142,7 @@ func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { var oid bsonprim.ObjectID copy(oid[:], data) *id = ObjectId(oid) diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go index f0b31096..3c93381c 100644 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -57,7 +57,7 @@ func (d *Date) UnmarshalText(text []byte) error { if len(text) == 0 { return nil } - dd, err := time.Parse(RFC3339FullDate, string(text)) + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) if err != nil { return err } @@ -107,7 +107,7 @@ func (d *Date) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &strdate); err != nil { return err } - tt, err := time.Parse(RFC3339FullDate, strdate) + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) if err != nil { return err } @@ -126,7 +126,7 @@ func (d *Date) UnmarshalBSON(data []byte) error { } if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) if err != nil { return err } diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index 172055d2..ad3b3c35 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -109,7 +109,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { // if to == tpe { switch v.Name { case "date": - d, err := time.Parse(RFC3339FullDate, data) + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 023676e6..9bef4c3b 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -29,6 +29,12 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) +var ( + // UnixZero sets the zero unix timestamp we want to compare against. + // Unix 0 for an EST timezone is not equivalent to a UTC timezone. + UnixZero = time.Unix(0, 0).UTC() +) + func init() { dt := DateTime{} Default.Add("datetime", &dt, IsDateTime) @@ -86,6 +92,9 @@ var ( // NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC). // By default, the time value is not changed. NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } + + // DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants). + DefaultTimeLocation = time.UTC ) // ParseDateTime parses a string that represents an ISO8601 time or a unix epoch @@ -95,7 +104,7 @@ func ParseDateTime(data string) (DateTime, error) { } var lastError error for _, layout := range DateTimeFormats { - dd, err := time.Parse(layout, data) + dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation) if err != nil { lastError = err continue @@ -123,6 +132,22 @@ func (t DateTime) String() string { return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) } +// IsZero returns whether the date time is a zero value +func (t *DateTime) IsZero() bool { + if t == nil { + return true + } + return time.Time(*t).IsZero() +} + +// IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +func (t *DateTime) IsUnixZero() bool { + if t == nil { + return true + } + return time.Time(*t).Equal(UnixZero) +} + // MarshalText implements the text marshaller interface func (t DateTime) MarshalText() ([]byte, error) { return []byte(t.String()), nil diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go index 4bd2ccd8..e71aff7c 100644 --- a/vendor/github.com/go-openapi/strfmt/ulid.go +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -15,9 +15,12 @@ import ( // ULID represents a ulid string format // ref: -// https://github.com/ulid/spec +// +// https://github.com/ulid/spec +// // impl: -// https://github.com/oklog/ulid +// +// https://github.com/oklog/ulid // // swagger:strfmt ulid type ULID struct { @@ -89,7 +92,9 @@ func NewULIDZero() ULID { } // NewULID generates new unique ULID value and a error if any -func NewULID() (u ULID, err error) { +func NewULID() (ULID, error) { + var u ULID + obj := ulidEntropyPool.Get() entropy, ok := obj.(io.Reader) if !ok { diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684..d971fbe3 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -341,12 +341,21 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) switch v.Kind() { case reflect.String: return v.Len() == 0 @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md index 5b0694fd..7b6be2c6 100644 --- a/vendor/github.com/go-playground/locales/README.md +++ b/vendor/github.com/go-playground/locales/README.md @@ -1,10 +1,8 @@ ## locales -![Project status](https://img.shields.io/badge/version-0.14.0-green.svg) +![Project status](https://img.shields.io/badge/version-0.14.1-green.svg) [![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) [![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) ![License](https://img.shields.io/dub/l/vibe-d.svg) -[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md index 46dec6d2..d9b66547 100644 --- a/vendor/github.com/go-playground/universal-translator/README.md +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -1,11 +1,9 @@ ## universal-translator -![Project status](https://img.shields.io/badge/version-0.18.0-green.svg) -[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) +![Project status](https://img.shields.io/badge/version-0.18.1-green.svg) [![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) [![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) ![License](https://img.shields.io/dub/l/vibe-d.svg) -[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go index 1216f192..87a1b465 100644 --- a/vendor/github.com/go-playground/universal-translator/import_export.go +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -3,7 +3,6 @@ package ut import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" @@ -41,7 +40,6 @@ const ( func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { _, err := os.Stat(dirname) - fmt.Println(dirname, err, os.IsNotExist(err)) if err != nil { if !os.IsNotExist(err) { @@ -138,7 +136,7 @@ func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) return err } - err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) if err != nil { return err } @@ -200,7 +198,7 @@ func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilenam // NOTE: generally used when assets have been embedded into the binary and are already in memory. func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { - b, err := ioutil.ReadAll(reader) + b, err := io.ReadAll(reader) if err != nil { return err } diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore index 6e43fac0..2410a91b 100644 --- a/vendor/github.com/go-playground/validator/v10/.gitignore +++ b/vendor/github.com/go-playground/validator/v10/.gitignore @@ -28,3 +28,4 @@ _testmain.go *.txt cover.html README.html +.idea diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 9d0a79e9..5f8878d2 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.11.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.11.2-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore new file mode 100644 index 00000000..b975a7b4 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.gitignore @@ -0,0 +1,3 @@ +*.rdb +testdata/*/ +.idea/ diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml new file mode 100644 index 00000000..de514554 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml @@ -0,0 +1,4 @@ +run: + concurrency: 8 + deadline: 5m + tests: false diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml new file mode 100644 index 00000000..8b7f044a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md new file mode 100644 index 00000000..195e5193 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md @@ -0,0 +1,177 @@ +## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) + + +### Bug Fixes + +* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) +* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) +* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) +* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) +* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) +* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) +* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) +* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) +* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) + + +### Features + +* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) +* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) +* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) +* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) +* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) +* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) +* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) + + + +## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) + + +### Features + +* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) +* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) +* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) + + + +## v8.11 + +- Remove OpenTelemetry metrics. +- Supports more redis commands and options. + +## v8.10 + +- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a + single span with a Redis command (instead of 4 spans). There are multiple reasons behind this + decision: + + - Traces become smaller and less noisy. + - It may be costly to process those 3 extra spans for each query. + - go-redis no longer depends on OpenTelemetry. + + Eventually we hope to replace the information that we no longer collect with OpenTelemetry + Metrics. + +## v8.9 + +- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, + `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. + +## v8.8 + +- To make updating easier, extra modules now have the same version as go-redis does. That means that + you need to update your imports: + +``` +github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 +github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 +``` + +## v8.5 + +- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a + struct: + +```go +err := rdb.HGetAll(ctx, "hash").Scan(&data) + +err := rdb.MGet(ctx, "key1", "key2").Scan(&data) +``` + +- Please check [redismock](https://github.com/go-redis/redismock) by + [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. + +## v8 + +- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not + using `context.Context` yet, the simplest option is to define global package variable + `var ctx = context.TODO()` and use it when `ctx` is required. + +- Full support for `context.Context` canceling. + +- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. + +- Added `redisext.OpenTemetryHook` that adds + [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). + +- Redis slow log support. + +- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move + existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: + +```go +import "github.com/golang/groupcache/consistenthash" + +ring := redis.NewRing(&redis.RingOptions{ + NewConsistentHash: func() { + return consistenthash.New(100, crc32.ChecksumIEEE) + }, +}) +``` + +- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. +- `Options.MaxRetries` default value is changed from 0 to 3. + +- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. + +## v7.3 + +- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection + URL contains username. + +## v7.2 + +- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. + +## v7.1 + +- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` + interface. + +## v7 + +- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a + transactional pipeline. +- WrapProcess is replaced with more convenient AddHook that has access to context.Context. +- WithContext now can not be used to create a shallow copy of the client. +- New methods ProcessContext, DoContext, and ExecContext. +- Client respects Context.Deadline when setting net.Conn deadline. +- Client listens on Context.Done while waiting for a connection from the pool and returns an error + when context context is cancelled. +- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow + detecting reconnections. +- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse + the time. +- `SetLimiter` is removed and added `Options.Limiter` instead. +- `HMSet` is deprecated as of Redis v4. + +## v6.15 + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set + `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout + occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis + Servers that don't have cluster mode enabled. See + https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE new file mode 100644 index 00000000..298bed9b --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile new file mode 100644 index 00000000..a4cfe057 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/Makefile @@ -0,0 +1,35 @@ +PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) + +test: testdeps + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + cd $< && make all + +fmt: + gofmt -w -s ./ + goimports -w -local github.com/go-redis/redis ./ + +go_mod_tidy: + go get -u && go mod tidy + set -e; for dir in $(PACKAGE_DIRS); do \ + echo "go mod tidy in $${dir}"; \ + (cd "$${dir}" && \ + go get -u && \ + go mod tidy); \ + done diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md new file mode 100644 index 00000000..f3b6a018 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/README.md @@ -0,0 +1,175 @@ +# Redis client for Go + +![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) + +go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +Uptrace is an open source and blazingly fast **distributed tracing** backend powered by +OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources + +- [Discussions](https://github.com/go-redis/redis/discussions) +- [Documentation](https://redis.uptrace.dev) +- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) +- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) + +Other projects you may like: + +- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. +- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. + +## Ecosystem + +- [Redis Mock](https://github.com/go-redis/redismock) +- [Distributed Locks](https://github.com/bsm/redislock) +- [Redis Cache](https://github.com/go-redis/cache) +- [Rate limiting](https://github.com/go-redis/redis_rate) + +## Features + +- Redis 3 commands except QUIT, MONITOR, and SYNC. +- Automatic connection pooling with + [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. +- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). +- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). +- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and + [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). +- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). +- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). +- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). +- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). +- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) + without using cluster mode and Redis Sentinel. +- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). +- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). + +## Installation + +go-redis supports 2 last Go versions and requires a Go version with +[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go +module: + +```shell +go mod init github.com/my/repo +``` + +And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): + +```shell +go get github.com/go-redis/redis/v8 +``` + +## Quickstart + +```go +import ( + "context" + "github.com/go-redis/redis/v8" + "fmt" +) + +var ctx = context.Background() + +func ExampleClient() { + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + err := rdb.Set(ctx, "key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := rdb.Get(ctx, "key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := rdb.Get(ctx, "key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() + +// SET key value keepttl NX +set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ + Keys: []string{"zset1", "zset2"}, + Weights: []int64{2, 3} +}).Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() + +// custom command +res, err := rdb.Do(ctx, "set", "key", "value").Result() +``` + +## Run the test + +go-redis will start a redis-server and run the test cases. + +The paths of redis-server bin file and redis config file are defined in `main_test.go`: + +``` +var ( + redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) + redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) +) +``` + +For local testing, you can change the variables to refer to your local files, or create a soft link +to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: + +``` +ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src +cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ +``` + +Lastly, run: + +``` +go test +``` + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md new file mode 100644 index 00000000..1115db4e --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/RELEASING.md @@ -0,0 +1,15 @@ +# Releasing + +1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: + +```shell +TAG=v1.0.0 ./scripts/release.sh +``` + +2. Open a pull request and wait for the build to finish. + +3. Merge the pull request and run `tag.sh` to create tags for packages: + +```shell +TAG=v1.0.0 ./scripts/tag.sh +``` diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go new file mode 100644 index 00000000..a54f2f37 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster.go @@ -0,0 +1,1750 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/rand" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // NewClient creates a cluster node client with provided name and options. + NewClient func(opt *Options) *Client + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 3 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func(context.Context) ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 3 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.GOMAXPROCS(0) + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + if opt.MaxRetries == 0 { + opt.MaxRetries = -1 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } + + if opt.NewClient == nil { + opt.NewClient = NewClient + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + // If ClusterSlots is populated, then we probably have an artificial + // cluster whose nodes are not in clustering mode (otherwise there isn't + // much use for ClusterSlots config). This means we cannot execute the + // READONLY command against that node -- setting readOnly to false in such + // situations in the options below will prevent that from happening. + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + failing uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: clOpt.NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const numProbe = 10 + var dur uint64 + + for i := 0; i < numProbe; i++ { + time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) + + start := time.Now() + n.Client.Ping(context.TODO()) + dur += uint64(time.Since(start) / time.Microsecond) + } + + latency := float64(dur) / float64(numProbe) + atomic.StoreUint32(&n.latency, uint32(latency+0.5)) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsFailing() { + atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Failing() bool { + const timeout = 15 // 15 seconds + + failing := atomic.LoadUint32(&n.failing) + if failing == 0 { + return false + } + if time.Now().Unix()-int64(failing) < timeout { + return true + } + atomic.StoreUint32(&n.failing, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + addrs []string + nodes map[string]*clusterNode + activeAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + addrs: opt.Addrs, + nodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.nodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.nodes = nil + c.activeAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + + c.mu.RLock() + closed := c.closed //nolint:ifshort + if !closed { + if len(c.activeAddrs) > 0 { + addrs = c.activeAddrs + } else { + addrs = c.addrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + //nolint:prealloc + var collected []*clusterNode + + c.mu.Lock() + + c.activeAddrs = c.activeAddrs[:0] + for addr, node := range c.nodes { + if node.Generation() >= generation { + c.activeAddrs = append(c.activeAddrs, addr) + if c.opt.RouteByLatency { + go node.updateLatency() + } + continue + } + + delete(c.nodes, addr) + collected = append(collected, node) + } + + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.nodes[addr] + if ok { + return node, nil + } + + node = newClusterNode(c.opt, addr) + + c.addrs = appendIfNotExists(c.addrs, addr) + c.nodes[addr] = node + + return node, nil +} + +func (c *clusterNodes) get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.nodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.nodes)) + for _, node := range c.nodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Failing() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Failing() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Failing() { + continue + } + if node == nil || n.Latency() < node.Latency() { + node = n + } + } + if node != nil { + return node, nil + } + + // If all nodes are failing - return random node + return c.nodes.Random() +} + +func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + if len(nodes) == 1 { + return nodes[0], nil + } + randomNodes := rand.Perm(len(nodes)) + for _, idx := range randomNodes { + if node := nodes[idx]; !node.Failing() { + return node, nil + } + } + return nodes[randomNodes[0]], nil +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func(ctx context.Context) (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { + state, err := c.load(ctx) + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload(context.Background()) + if err != nil { + return + } + time.Sleep(200 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { + v := c.state.Load() + if v == nil { + return c.Reload(ctx) + } + + state := v.(*clusterState) + if time.Since(state.createdAt) > 10*time.Second { + c.LazyReload() + } + return state, nil +} + +func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { + state, err := c.Reload(ctx) + if err == nil { + return state, nil + } + return c.Get(ctx) +} + +//------------------------------------------------------------------------------ + +type clusterClient struct { + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder //nolint:structcheck + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + *clusterClient + cmdable + hooks + ctx context.Context +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + clusterClient: &clusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + }, + ctx: context.Background(), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + c.cmdable = c.Process + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) Context() context.Context { + return c.ctx +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState(ctx context.Context) { + c.state.LazyReload() +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + var node *clusterNode + var ask bool + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + if node == nil { + var err error + node, err = c.cmdNode(ctx, cmdInfo, slot) + if err != nil { + return err + } + } + + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(ctx, NewCmd(ctx, "asking")) + _ = pipe.Process(ctx, cmd) + _, lastErr = pipe.Exec(ctx) + _ = pipe.Close() + ask = false + } else { + lastErr = node.Client.Process(ctx, cmd) + } + + // If there is no error - we are done. + if lastErr == nil { + return nil + } + if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node = nil + continue + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && isLoadingError(lastErr) { + node.MarkAsFailing() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(lastErr) + if moved || ask { + c.state.LazyReload() + + var err error + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if shouldRetry(lastErr, cmd.readTimeout() == nil) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try another node. + node.MarkAsFailing() + node = nil + continue + } + + return lastErr + } + return lastErr +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachShard concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get(context.TODO()) + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots(ctx) + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + for _, idx := range rand.Perm(len(addrs)) { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots(ctx).Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + /* + * No node is connectable. It's possible that all nodes' IP has changed. + * Clear activeAddrs to let client be able to re-connect using the initial + * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), + * which might have chance to resolve domain name and get updated IP address. + */ + c.nodes.mu.Lock() + c.nodes.activeAddrs = nil + c.nodes.mu.Unlock() + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c._processPipeline) +} + +func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(ctx, cmdsMap, cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil + } + + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := state.slotMasterNode(slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) _processPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) + }) + }) + }) +} + +func (c *ClusterClient) pipelineReadCmds( + ctx context.Context, + node *clusterNode, + rd *proto.Reader, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + + if err == nil { + continue + } + + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + continue + } + + if c.opt.ReadOnly && isLoadingError(err) { + node.MarkAsFailing() + return err + } + if isRedisError(err) { + continue + } + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := isMovedError(err) + if !moved && !ask { + return false + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + if moved { + c.state.LazyReload() + failedCmds.Add(node, cmd) + return true + } + + if ask { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + return true + } + + panic("not reached") +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) +} + +func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { + // Trim multi .. exec. + cmds = cmds[1 : len(cmds)-1] + + state, err := c.state.Get(ctx) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) _processTxPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) + if err != nil { + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) + } + return err + } + + return pipelineReadCmds(rd, cmds) + }) + }) + }) +} + +func (c *ClusterClient) txPipelineReadQueued( + ctx context.Context, + rd *proto.Reader, + statusCmd *StatusCmd, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { + continue + } + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + return fmt.Errorf("redis: expected '*', but got line %q", line) + } + + return nil +} + +func (c *ClusterClient) cmdsMoved( + ctx context.Context, cmds []Cmder, + moved, ask bool, + addr string, + failedCmds *cmdsMap, +) error { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + + if moved { + c.state.LazyReload() + for _, cmd := range cmds { + failedCmds.Add(node, cmd) + } + return nil + } + + if ask { + for _, cmd := range cmds { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + } + return nil + } + + return nil +} + +func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + err = node.Client.Watch(ctx, fn, keys...) + if err == nil { + break + } + + moved, ask, addr := isMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + continue + } + + if shouldRetry(err, true) { + continue + } + + return err + } + + return err +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(ctx, slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn(context.TODO()) + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // Try 3 random nodes. + const nodeLimit = 3 + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + perm := rand.Perm(len(addrs)) + if len(perm) > nodeLimit { + perm = perm[:nodeLimit] + } + + for _, idx := range perm { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + info, err := node.Client.Command(ctx).Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + + if firstErr == nil { + panic("not reached") + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + } + return info +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdNode( + ctx context.Context, + cmdInfo *CommandInfo, + slot int, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + return state.slotMasterNode(slot) +} + +func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { + if c.opt.RouteByLatency { + return state.slotClosestNode(slot) + } + if c.opt.RouteRandomly { + return state.slotRandomNode(slot) + } + return state.slotSlaveNode(slot) +} + +func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + return state.slotMasterNode(slot) +} + +// SlaveForKey gets a client for a replica node to run any command on it. +// This is especially useful if we want to run a particular lua script which has +// only read only commands on the replica. +// This is because other redis commands generally have a flag that points that +// they are read only and automatically run on the replica nodes +// if ClusterOptions.ReadOnly flag is set to true. +func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + slot := hashtag.Slot(key) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +// MasterForKey return a client to the master node for a particular key. +func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { + slot := hashtag.Slot(key) + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +//------------------------------------------------------------------------------ + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { + m.mu.Lock() + m.m[node] = append(m.m[node], cmds...) + m.mu.Unlock() +} diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go new file mode 100644 index 00000000..085bce83 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go @@ -0,0 +1,109 @@ +package redis + +import ( + "context" + "sync" + "sync/atomic" +) + +func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var size int64 + err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { + n, err := master.DBSize(ctx).Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = size + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { + cmd := NewStringCmd(ctx, "script", "load", script) + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptLoad(ctx, script).Result() + if err != nil { + return err + } + + mu.Lock() + if cmd.Val() == "" { + cmd.val = val + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "flush") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + return shard.ScriptFlush(ctx).Err() + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(hashes)) + args[0] = "script" + args[1] = "exists" + for i, hash := range hashes { + args[2+i] = hash + } + cmd := NewBoolSliceCmd(ctx, args...) + + result := make([]bool, len(hashes)) + for i := range result { + result[i] = true + } + + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptExists(ctx, hashes...).Result() + if err != nil { + return err + } + + mu.Lock() + for i, v := range val { + result[i] = result[i] && v + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = result + } + return nil + }) + return cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go new file mode 100644 index 00000000..4bb12a85 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/command.go @@ -0,0 +1,3478 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hscan" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + firstKeyPos() int8 + SetFirstKeyPos(int8) + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + if pos := cmd.firstKeyPos(); pos != 0 { + return int(pos) + } + + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info != nil { + return int(info.FirstKeyPos) + } + return 0 +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + keyPos int8 + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } +} + +func (cmd *baseCmd) firstKeyPos() int8 { + return cmd.keyPos +} + +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { + cmd.keyPos = keyPos +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) SetVal(val interface{}) { + cmd.val = val +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + return toString(cmd.val) +} + +func toString(val interface{}) (string, error) { + switch val := val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toInt64(cmd.val) +} + +func toInt64(val interface{}) (int64, error) { + switch val := val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toUint64(cmd.val) +} + +func toUint64(val interface{}) (uint64, error) { + switch val := val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat32(cmd.val) +} + +func toFloat32(val interface{}) (float32, error) { + switch val := val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat64(cmd.val) +} + +func toFloat64(val interface{}) (float64, error) { + switch val := val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return toBool(cmd.val) +} + +func toBool(val interface{}) (bool, error) { + switch val := val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) Slice() ([]interface{}, error) { + if cmd.err != nil { + return nil, cmd.err + } + switch val := cmd.val.(type) { + case []interface{}: + return val, nil + default: + return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) + } +} + +func (cmd *Cmd) StringSlice() ([]string, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + ss := make([]string, len(slice)) + for i, iface := range slice { + val, err := toString(iface) + if err != nil { + return nil, err + } + ss[i] = val + } + return ss, nil +} + +func (cmd *Cmd) Int64Slice() ([]int64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]int64, len(slice)) + for i, iface := range slice { + val, err := toInt64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Uint64Slice() ([]uint64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]uint64, len(slice)) + for i, iface := range slice { + val, err := toUint64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Float32Slice() ([]float32, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float32, len(slice)) + for i, iface := range slice { + val, err := toFloat32(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) Float64Slice() ([]float64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float64, len(slice)) + for i, iface := range slice { + val, err := toFloat64(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) BoolSlice() ([]bool, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + bools := make([]bool, len(slice)) + for i, iface := range slice { + val, err := toBool(iface) + if err != nil { + return nil, err + } + bools[i] = val + } + return bools, nil +} + +func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadReply(sliceParser) + return err +} + +// sliceParser implements proto.MultiBulkParse. +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, n) + for i := 0; i < len(vals); i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals[i] = nil + continue + } + if err, ok := err.(proto.RedisError); ok { + vals[i] = err + continue + } + return nil, err + } + vals[i] = v + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) SetVal(val []interface{}) { + cmd.val = val +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *SliceCmd) Scan(dst interface{}) error { + if cmd.err != nil { + return cmd.err + } + + // Pass the list of keys and values. + // Skip the first two args for: HMGET key + var args []interface{} + if cmd.args[0] == "hmget" { + args = cmd.args[2:] + } else { + // Otherwise, it's: MGET field field ... + args = cmd.args[1:] + } + + return hscan.Scan(dst, args, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(sliceParser) + if err != nil { + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) SetVal(val int64) { + cmd.val = val +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadIntReply() + return err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) SetVal(val []int64) { + cmd.val = val +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + num, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = num + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) SetVal(val time.Duration) { + cmd.val = val +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadIntReply() + if err != nil { + return err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) SetVal(val time.Time) { + cmd.val = val +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + cmd.val = time.Unix(sec, microsec*1000) + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) SetVal(val bool) { + cmd.val = val +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if err == Nil { + cmd.val = false + return nil + } + if err != nil { + return err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + return fmt.Errorf("got %T, wanted int64 or string", v) + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return strconv.ParseBool(cmd.val) +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) SetVal(val float64) { + cmd.val = val +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadFloatReply() + return err +} + +//------------------------------------------------------------------------------ + +type FloatSliceCmd struct { + baseCmd + + val []float64 +} + +var _ Cmder = (*FloatSliceCmd)(nil) + +func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { + return &FloatSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatSliceCmd) SetVal(val []float64) { + cmd.val = val +} + +func (cmd *FloatSliceCmd) Val() []float64 { + return cmd.val +} + +func (cmd *FloatSliceCmd) Result() ([]float64, error) { + return cmd.val, cmd.err +} + +func (cmd *FloatSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]float64, n) + for i := 0; i < len(cmd.val); i++ { + switch num, err := rd.ReadFloatReply(); { + case err == Nil: + cmd.val[i] = 0 + case err != nil: + return nil, err + default: + cmd.val[i] = num + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) SetVal(val []string) { + cmd.val = val +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return nil, err + default: + cmd.val[i] = s + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) SetVal(val []bool) { + cmd.val = val +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = n == 1 + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStringMapCmd) SetVal(val map[string]string) { + cmd.val = val +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *StringStringMapCmd) Scan(dest interface{}) error { + if cmd.err != nil { + return cmd.err + } + + strct, err := hscan.Struct(dest) + if err != nil { + return err + } + + for k, v := range cmd.val { + if err := strct.Scan(k, v); err != nil { + return err + } + } + + return nil +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val[key] = value + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { + cmd.val = val +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + cmd.val[key] = n + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { + cmd.val = val +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + cmd.val[key] = struct{}{} + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { + cmd.val = val +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var err error + cmd.val, err = readXMessageSlice(rd) + return err +} + +func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + msgs := make([]XMessage, n) + for i := 0; i < n; i++ { + var err error + msgs[i], err = readXMessage(rd) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +func readXMessage(rd *proto.Reader) (XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return XMessage{}, err + } + if n != 2 { + return XMessage{}, fmt.Errorf("got %d, wanted 2", n) + } + + id, err := rd.ReadString() + if err != nil { + return XMessage{}, err + } + + var values map[string]interface{} + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + if err != proto.Nil { + return XMessage{}, err + } + } else { + values = v.(map[string]interface{}) + } + + return XMessage{ + ID: id, + Values: values, + }, nil +} + +// stringInterfaceMapParser implements proto.MultiBulkParse. +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) SetVal(val []XStream) { + cmd.val = val +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + msgs, err := readXMessageSlice(rd) + if err != nil { + return nil, err + } + + cmd.val[i] = XStream{ + Stream: stream, + Messages: msgs, + } + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) SetVal(val *XPending) { + cmd.val = val +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if cmd.val.Consumers == nil { + cmd.val.Consumers = make(map[string]int64) + } + cmd.val.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { + cmd.val = val +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = append(cmd.val, XPendingExt{ + ID: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimCmd struct { + baseCmd + + start string + val []XMessage +} + +var _ Cmder = (*XAutoClaimCmd)(nil) + +func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { + return &XAutoClaimCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val, err = readXMessageSlice(rd) + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimJustIDCmd struct { + baseCmd + + start string + val []string +} + +var _ Cmder = (*XAutoClaimJustIDCmd)(nil) + +func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { + return &XAutoClaimJustIDCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimJustIDCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XInfoConsumersCmd struct { + baseCmd + val []XInfoConsumer +} + +type XInfoConsumer struct { + Name string + Pending int64 + Idle int64 +} + +var _ Cmder = (*XInfoConsumersCmd)(nil) + +func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { + return &XInfoConsumersCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + }, + } +} + +func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { + cmd.val = val +} + +func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { + return cmd.val +} + +func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoConsumersCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoConsumer, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXConsumerInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { + var consumer XInfoConsumer + + n, err := rd.ReadArrayLen() + if err != nil { + return consumer, err + } + if n != 6 { + return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) + } + + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return consumer, err + } + + val, err := rd.ReadString() + if err != nil { + return consumer, err + } + + switch key { + case "name": + consumer.Name = val + case "pending": + consumer.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + case "idle": + consumer.Idle, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + default: + return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) + } + } + + return consumer, nil +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroup +} + +type XInfoGroup struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { + cmd.val = val +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoGroup, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXGroupInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { + var group XInfoGroup + + n, err := rd.ReadArrayLen() + if err != nil { + return group, err + } + if n != 8 { + return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) + } + + for i := 0; i < 4; i++ { + key, err := rd.ReadString() + if err != nil { + return group, err + } + + val, err := rd.ReadString() + if err != nil { + return group, err + } + + switch key { + case "name": + group.Name = val + case "consumers": + group.Consumers, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "pending": + group.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "last-delivered-id": + group.LastDeliveredID = val + default: + return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) + } + } + + return group, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamCmd struct { + baseCmd + val *XInfoStream +} + +type XInfoStream struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + FirstEntry XMessage + LastEntry XMessage +} + +var _ Cmder = (*XInfoStreamCmd)(nil) + +func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { + return &XInfoStreamCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + }, + } +} + +func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { + cmd.val = val +} + +func (cmd *XInfoStreamCmd) Val() *XInfoStream { + return cmd.val +} + +func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(xStreamInfoParser) + if err != nil { + return err + } + cmd.val = v.(*XInfoStream) + return nil +} + +func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 14 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 14", n) + } + var info XInfoStream + for i := 0; i < 7; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + switch key { + case "length": + info.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + info.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + info.RadixTreeNodes, err = rd.ReadIntReply() + case "groups": + info.Groups, err = rd.ReadIntReply() + case "last-generated-id": + info.LastGeneratedID, err = rd.ReadString() + case "first-entry": + info.FirstEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + case "last-entry": + info.LastEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return nil, err + } + } + return &info, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamFullCmd struct { + baseCmd + val *XInfoStreamFull +} + +type XInfoStreamFull struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + Entries []XMessage + Groups []XInfoStreamGroup +} + +type XInfoStreamGroup struct { + Name string + LastDeliveredID string + PelCount int64 + Pending []XInfoStreamGroupPending + Consumers []XInfoStreamConsumer +} + +type XInfoStreamGroupPending struct { + ID string + Consumer string + DeliveryTime time.Time + DeliveryCount int64 +} + +type XInfoStreamConsumer struct { + Name string + SeenTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending +} + +type XInfoStreamConsumerPending struct { + ID string + DeliveryTime time.Time + DeliveryCount int64 +} + +var _ Cmder = (*XInfoStreamFullCmd)(nil) + +func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { + return &XInfoStreamFullCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { + cmd.val = val +} + +func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { + return cmd.val +} + +func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamFullCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + if n != 12 { + return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 12", n) + } + + cmd.val = &XInfoStreamFull{} + + for i := 0; i < 6; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "length": + cmd.val.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadIntReply() + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + case "entries": + cmd.val.Entries, err = readXMessageSlice(rd) + case "groups": + cmd.val.Groups, err = readStreamGroups(rd) + default: + return fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return err + } + } + return nil +} + +func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + groups := make([]XInfoStreamGroup, 0, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 10 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 10", nn) + } + + group := XInfoStreamGroup{} + + for f := 0; f < 5; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + case "pel-count": + group.PelCount, err = rd.ReadIntReply() + case "pending": + group.Pending, err = readXInfoStreamGroupPending(rd) + case "consumers": + group.Consumers, err = readXInfoStreamConsumers(rd) + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + + if err != nil { + return nil, err + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + pending := make([]XInfoStreamGroupPending, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 4 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 4", nn) + } + + p := XInfoStreamGroupPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + p.Consumer, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + pending = append(pending, p) + } + + return pending, nil +} + +func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + consumers := make([]XInfoStreamConsumer, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 8 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 8", nn) + } + + c := XInfoStreamConsumer{} + + for f := 0; f < 4; f++ { + cKey, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch cKey { + case "name": + c.Name, err = rd.ReadString() + case "seen-time": + seen, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) + case "pel-count": + c.PelCount, err = rd.ReadIntReply() + case "pending": + pendingNumber, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) + + for pn := 0; pn < pendingNumber; pn++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 3 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 3", nn) + } + + p := XInfoStreamConsumerPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + c.Pending = append(c.Pending, p) + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", cKey) + } + if err != nil { + return nil, err + } + } + consumers = append(consumers, c) + } + + return consumers, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) SetVal(val []Z) { + cmd.val = val +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]Z, n/2) + for i := 0; i < len(cmd.val); i++ { + member, err := rd.ReadString() + if err != nil { + return nil, err + } + + score, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = Z{ + Member: member, + Score: score, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { + cmd.val = val +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + cmd.val = &ZWithKey{} + var err error + + cmd.val.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { + cmd.page = page + cmd.cursor = cursor +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { + cmd.page, cmd.cursor, err = rd.ReadScanReply() + return err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { + cmd.val = val +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]ClusterSlot, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].ID = id + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { + cmd.locations = locations +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if err != nil { + return err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + // TODO: avoid copying + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +//------------------------------------------------------------------------------ + +// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. +type GeoSearchQuery struct { + Member string + + // Latitude and Longitude when using FromLonLat option. + Longitude float64 + Latitude float64 + + // Distance and unit when using ByRadius option. + // Can use m, km, ft, or mi. Default is km. + Radius float64 + RadiusUnit string + + // Height, width and unit when using ByBox option. + // Can be m, km, ft, or mi. Default is km. + BoxWidth float64 + BoxHeight float64 + BoxUnit string + + // Can be ASC or DESC. Default is no sort order. + Sort string + Count int + CountAny bool +} + +type GeoSearchLocationQuery struct { + GeoSearchQuery + + WithCoord bool + WithDist bool + WithHash bool +} + +type GeoSearchStoreQuery struct { + GeoSearchQuery + + // When using the StoreDist option, the command stores the items in a + // sorted set populated with their distance from the center of the circle or box, + // as a floating-point number, in the same unit specified for that shape. + StoreDist bool +} + +func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { + args = geoSearchArgs(&q.GeoSearchQuery, args) + + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithHash { + args = append(args, "withhash") + } + + return args +} + +func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { + if q.Member != "" { + args = append(args, "frommember", q.Member) + } else { + args = append(args, "fromlonlat", q.Longitude, q.Latitude) + } + + if q.Radius > 0 { + if q.RadiusUnit == "" { + q.RadiusUnit = "km" + } + args = append(args, "byradius", q.Radius, q.RadiusUnit) + } else { + if q.BoxUnit == "" { + q.BoxUnit = "km" + } + args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) + } + + if q.Sort != "" { + args = append(args, q.Sort) + } + + if q.Count > 0 { + args = append(args, "count", q.Count) + if q.CountAny { + args = append(args, "any") + } + } + + return args +} + +type GeoSearchLocationCmd struct { + baseCmd + + opt *GeoSearchLocationQuery + val []GeoLocation +} + +var _ Cmder = (*GeoSearchLocationCmd)(nil) + +func NewGeoSearchLocationCmd( + ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, +) *GeoSearchLocationCmd { + return &GeoSearchLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + opt: opt, + } +} + +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + +func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { + return cmd.val +} + +func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { + return cmd.val, cmd.err +} + +func (cmd *GeoSearchLocationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]GeoLocation, n) + for i := 0; i < n; i++ { + _, err = rd.ReadArrayLen() + if err != nil { + return err + } + + var loc GeoLocation + + loc.Name, err = rd.ReadString() + if err != nil { + return err + } + if cmd.opt.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + if cmd.opt.WithHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return err + } + } + if cmd.opt.WithCoord { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn != 2 { + return fmt.Errorf("got %d coordinates, expected 2", nn) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + + cmd.val[i] = loc + } + + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { + cmd.val = val +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]*GeoPos, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + longitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + latitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + return nil, nil + }) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { + cmd.val = val +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + cmd.val[vv.Name] = vv + } + return nil, nil + }) + return err +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + + switch n { + case numArgRedis5, numArgRedis6: + // continue + default: + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) + } + + var cmd CommandInfo + var err error + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.Flags = make([]string, n) + for i := 0; i < len(cmd.Flags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.Flags[i] = "" + case err != nil: + return nil, err + default: + cmd.Flags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + if n == numArgRedis5 { + return &cmd, nil + } + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.ACLFlags = make([]string, n) + for i := 0; i < len(cmd.ACLFlags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.ACLFlags[i] = "" + case err != nil: + return nil, err + default: + cmd.ACLFlags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func(ctx context.Context) (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn(ctx) + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} + +//------------------------------------------------------------------------------ + +type SlowLog struct { + ID int64 + Time time.Time + Duration time.Duration + Args []string + // These are also optional fields emitted only by Redis 4.0 or greater: + // https://redis.io/commands/slowlog#output-format + ClientAddr string + ClientName string +} + +type SlowLogCmd struct { + baseCmd + + val []SlowLog +} + +var _ Cmder = (*SlowLogCmd)(nil) + +func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { + return &SlowLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SlowLogCmd) SetVal(val []SlowLog) { + cmd.val = val +} + +func (cmd *SlowLogCmd) Val() []SlowLog { + return cmd.val +} + +func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *SlowLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]SlowLog, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 4 { + err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) + return nil, err + } + + id, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + createdAt, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + createdAtTime := time.Unix(createdAt, 0) + + costs, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + costsDuration := time.Duration(costs) * time.Microsecond + + cmdLen, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if cmdLen < 1 { + err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) + return nil, err + } + + cmdString := make([]string, cmdLen) + for i := 0; i < cmdLen; i++ { + cmdString[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + var address, name string + for i := 4; i < n; i++ { + str, err := rd.ReadString() + if err != nil { + return nil, err + } + if i == 4 { + address = str + } else if i == 5 { + name = str + } + } + + cmd.val[i] = SlowLog{ + ID: id, + Time: createdAtTime, + Duration: costsDuration, + Args: cmdString, + ClientAddr: address, + ClientName: name, + } + } + return nil, nil + }) + return err +} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go new file mode 100644 index 00000000..bbfe089d --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/commands.go @@ -0,0 +1,3475 @@ +package redis + +import ( + "context" + "errors" + "io" + "time" + + "github.com/go-redis/redis/v8/internal" +) + +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +// For example: +// +// rdb.Set(ctx, key, value, redis.KeepTTL) +const KeepTTL = -1 + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1ms", + dur, time.Millisecond, + ) + return 1 + } + return int64(dur / time.Millisecond) +} + +func formatSec(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1s", + dur, time.Second, + ) + return 1 + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + return appendArg(dst, src[0]) + } + + dst = append(dst, src...) + return dst +} + +func appendArg(dst []interface{}, arg interface{}) []interface{} { + switch arg := arg.(type) { + case []string: + for _, s := range arg { + dst = append(dst, s) + } + return dst + case []interface{}: + dst = append(dst, arg...) + return dst + case map[string]interface{}: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + case map[string]string: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + default: + return append(dst, arg) + } +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command(ctx context.Context) *CommandsInfoCmd + ClientGetName(ctx context.Context) *StringCmd + Echo(ctx context.Context, message interface{}) *StringCmd + Ping(ctx context.Context) *StatusCmd + Quit(ctx context.Context) *StatusCmd + Del(ctx context.Context, keys ...string) *IntCmd + Unlink(ctx context.Context, keys ...string) *IntCmd + Dump(ctx context.Context, key string) *StringCmd + Exists(ctx context.Context, keys ...string) *IntCmd + Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + Keys(ctx context.Context, pattern string) *StringSliceCmd + Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd + Move(ctx context.Context, key string, db int) *BoolCmd + ObjectRefCount(ctx context.Context, key string) *IntCmd + ObjectEncoding(ctx context.Context, key string) *StringCmd + ObjectIdleTime(ctx context.Context, key string) *DurationCmd + Persist(ctx context.Context, key string) *BoolCmd + PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PTTL(ctx context.Context, key string) *DurationCmd + RandomKey(ctx context.Context) *StringCmd + Rename(ctx context.Context, key, newkey string) *StatusCmd + RenameNX(ctx context.Context, key, newkey string) *BoolCmd + Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd + SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd + Touch(ctx context.Context, keys ...string) *IntCmd + TTL(ctx context.Context, key string) *DurationCmd + Type(ctx context.Context, key string) *StatusCmd + Append(ctx context.Context, key, value string) *IntCmd + Decr(ctx context.Context, key string) *IntCmd + DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + Get(ctx context.Context, key string) *StringCmd + GetRange(ctx context.Context, key string, start, end int64) *StringCmd + GetSet(ctx context.Context, key string, value interface{}) *StringCmd + GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd + GetDel(ctx context.Context, key string) *StringCmd + Incr(ctx context.Context, key string) *IntCmd + IncrBy(ctx context.Context, key string, value int64) *IntCmd + IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd + MGet(ctx context.Context, keys ...string) *SliceCmd + MSet(ctx context.Context, values ...interface{}) *StatusCmd + MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd + // TODO: rename to SetEx + SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd + StrLen(ctx context.Context, key string) *IntCmd + Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd + + GetBit(ctx context.Context, key string, offset int64) *IntCmd + SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd + BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd + BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpNot(ctx context.Context, destKey string, key string) *IntCmd + BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd + + Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd + ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd + SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + + HDel(ctx context.Context, key string, fields ...string) *IntCmd + HExists(ctx context.Context, key, field string) *BoolCmd + HGet(ctx context.Context, key, field string) *StringCmd + HGetAll(ctx context.Context, key string) *StringStringMapCmd + HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd + HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd + HKeys(ctx context.Context, key string) *StringSliceCmd + HLen(ctx context.Context, key string) *IntCmd + HMGet(ctx context.Context, key string, fields ...string) *SliceCmd + HSet(ctx context.Context, key string, values ...interface{}) *IntCmd + HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd + HVals(ctx context.Context, key string) *StringSliceCmd + HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd + + BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LIndex(ctx context.Context, key string, index int64) *StringCmd + LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LLen(ctx context.Context, key string) *IntCmd + LPop(ctx context.Context, key string) *StringCmd + LPopCount(ctx context.Context, key string, count int) *StringSliceCmd + LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd + LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd + LPush(ctx context.Context, key string, values ...interface{}) *IntCmd + LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd + LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd + LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd + RPop(ctx context.Context, key string) *StringCmd + RPopCount(ctx context.Context, key string, count int) *StringSliceCmd + RPopLPush(ctx context.Context, source, destination string) *StringCmd + RPush(ctx context.Context, key string, values ...interface{}) *IntCmd + RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd + BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd + + SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd + SCard(ctx context.Context, key string) *IntCmd + SDiff(ctx context.Context, keys ...string) *StringSliceCmd + SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd + SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd + SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd + SMembers(ctx context.Context, key string) *StringSliceCmd + SMembersMap(ctx context.Context, key string) *StringStructMapCmd + SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd + SPop(ctx context.Context, key string) *StringCmd + SPopN(ctx context.Context, key string, count int64) *StringSliceCmd + SRandMember(ctx context.Context, key string) *StringCmd + SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd + SRem(ctx context.Context, key string, members ...interface{}) *IntCmd + SUnion(ctx context.Context, keys ...string) *StringSliceCmd + SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd + + XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XLen(ctx context.Context, stream string) *IntCmd + XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd + XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd + XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd + XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd + XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd + XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd + XGroupDestroy(ctx context.Context, stream, group string) *IntCmd + XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd + XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd + XPending(ctx context.Context, stream, group string) *XPendingCmd + XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd + XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd + XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd + XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd + + // TODO: XTrim and XTrimApprox remove in v9. + XTrim(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMinID(ctx context.Context, key string, minID string) *IntCmd + XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd + XInfoStream(ctx context.Context, key string) *XInfoStreamCmd + XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd + XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd + + BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + + // TODO: remove + // ZAddCh + // ZIncr + // ZAddNXCh + // ZAddXXCh + // ZIncrNX + // ZIncrXX + // in v9. + // use ZAddArgs and ZAddArgsIncr. + + ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd + ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd + ZIncr(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd + ZCard(ctx context.Context, key string) *IntCmd + ZCount(ctx context.Context, key, min, max string) *IntCmd + ZLexCount(ctx context.Context, key, min, max string) *IntCmd + ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd + ZInter(ctx context.Context, store *ZStore) *StringSliceCmd + ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd + ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd + ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd + ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd + ZRank(ctx context.Context, key, member string) *IntCmd + ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd + ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd + ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd + ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRevRank(ctx context.Context, key, member string) *IntCmd + ZScore(ctx context.Context, key, member string) *FloatCmd + ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + ZUnion(ctx context.Context, store ZStore) *StringSliceCmd + ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd + ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd + ZDiff(ctx context.Context, keys ...string) *StringSliceCmd + ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd + ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + + PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd + PFCount(ctx context.Context, keys ...string) *IntCmd + PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd + + BgRewriteAOF(ctx context.Context) *StatusCmd + BgSave(ctx context.Context) *StatusCmd + ClientKill(ctx context.Context, ipPort string) *StatusCmd + ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd + ClientList(ctx context.Context) *StringCmd + ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientID(ctx context.Context) *IntCmd + ConfigGet(ctx context.Context, parameter string) *SliceCmd + ConfigResetStat(ctx context.Context) *StatusCmd + ConfigSet(ctx context.Context, parameter, value string) *StatusCmd + ConfigRewrite(ctx context.Context) *StatusCmd + DBSize(ctx context.Context) *IntCmd + FlushAll(ctx context.Context) *StatusCmd + FlushAllAsync(ctx context.Context) *StatusCmd + FlushDB(ctx context.Context) *StatusCmd + FlushDBAsync(ctx context.Context) *StatusCmd + Info(ctx context.Context, section ...string) *StringCmd + LastSave(ctx context.Context) *IntCmd + Save(ctx context.Context) *StatusCmd + Shutdown(ctx context.Context) *StatusCmd + ShutdownSave(ctx context.Context) *StatusCmd + ShutdownNoSave(ctx context.Context) *StatusCmd + SlaveOf(ctx context.Context, host, port string) *StatusCmd + Time(ctx context.Context) *TimeCmd + DebugObject(ctx context.Context, key string) *StringCmd + ReadOnly(ctx context.Context) *StatusCmd + ReadWrite(ctx context.Context) *StatusCmd + MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptFlush(ctx context.Context) *StatusCmd + ScriptKill(ctx context.Context) *StatusCmd + ScriptLoad(ctx context.Context, script string) *StringCmd + + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumPat(ctx context.Context) *IntCmd + + ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterNodes(ctx context.Context) *StringCmd + ClusterMeet(ctx context.Context, host, port string) *StatusCmd + ClusterForget(ctx context.Context, nodeID string) *StatusCmd + ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd + ClusterResetSoft(ctx context.Context) *StatusCmd + ClusterResetHard(ctx context.Context) *StatusCmd + ClusterInfo(ctx context.Context) *StringCmd + ClusterKeySlot(ctx context.Context, key string) *IntCmd + ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd + ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd + ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd + ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd + ClusterSaveConfig(ctx context.Context) *StatusCmd + ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd + ClusterFailover(ctx context.Context) *StatusCmd + ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd + + GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd + GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd + GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd + GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd + GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd + GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd + GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd + GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(ctx context.Context, password string) *StatusCmd + AuthACL(ctx context.Context, username, password string) *StatusCmd + Select(ctx context.Context, index int) *StatusCmd + SwapDB(ctx context.Context, index1, index2 int) *StatusCmd + ClientSetName(ctx context.Context, name string) *BoolCmd +} + +var ( + _ Cmdable = (*Client)(nil) + _ Cmdable = (*Tx)(nil) + _ Cmdable = (*Ring)(nil) + _ Cmdable = (*ClusterClient)(nil) +) + +type cmdable func(ctx context.Context, cmd Cmder) error + +type statefulCmdable func(ctx context.Context, cmd Cmder) error + +//------------------------------------------------------------------------------ + +func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", password) + _ = c(ctx, cmd) + return cmd +} + +// AuthACL Perform an AUTH command, using the given user and pass. +// Should be used to authenticate the current connection with one of the connections defined in the ACL list +// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. +func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", username, password) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { + cmd := NewStatusCmd(ctx, "select", index) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { + cmd := NewStatusCmd(ctx, "swapdb", index1, index2) + _ = c(ctx, cmd) + return cmd +} + +// ClientSetName assigns a name to the connection. +func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "setname", name) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { + cmd := NewCommandsInfoCmd(ctx, "command") + _ = c(ctx, cmd) + return cmd +} + +// ClientGetName returns the name of the connection. +func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "getname") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "echo", message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Ping(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "ping") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Quit(_ context.Context) *StatusCmd { + panic("not implemented") +} + +func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "dump", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "") +} + +func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "NX") +} + +func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "XX") +} + +func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "GT") +} + +func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "LT") +} + +func (c cmdable) expire( + ctx context.Context, key string, expiration time.Duration, mode string, +) *BoolCmd { + args := make([]interface{}, 3, 4) + args[0] = "expire" + args[1] = key + args[2] = formatSec(ctx, expiration) + if mode != "" { + args = append(args, mode) + } + + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "keys", pattern) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "migrate", + host, + port, + key, + db, + formatMs(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { + cmd := NewBoolCmd(ctx, "move", key, db) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "refcount", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "object", "encoding", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { + cmd := NewBoolCmd(ctx, "persist", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + ctx, + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RandomKey(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "randomkey") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { + cmd := NewStatusCmd(ctx, "rename", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { + cmd := NewBoolCmd(ctx, "renamenx", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + "replace", + ) + _ = c(ctx, cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "ttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { + cmd := NewStatusCmd(ctx, "type", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { + cmd := NewIntCmd(ctx, "append", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "decr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { + cmd := NewIntCmd(ctx, "decrby", key, decrement) + _ = c(ctx, cmd) + return cmd +} + +// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c cmdable) Get(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "get", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { + cmd := NewStringCmd(ctx, "getrange", key, start, end) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "getset", key, value) + _ = c(ctx, cmd) + return cmd +} + +// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). +// Requires Redis >= 6.2.0. +func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { + args := make([]interface{}, 0, 4) + args = append(args, "getex", key) + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == 0 { + args = append(args, "persist") + } + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// GetDel redis-server version >= 6.2.0. +func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "getdel", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "incr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { + cmd := NewIntCmd(ctx, "incrby", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSet is like Set but accepts multiple values: +// - MSet("key1", "value1", "key2", "value2") +// - MSet([]string{"key1", "value1", "key2", "value2"}) +// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "mset" + args = appendArgs(args, values) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSetNX is like SetNX but accepts multiple values: +// - MSetNX("key1", "value1", "key2", "value2") +// - MSetNX([]string{"key1", "value1", "key2", "value2"}) +// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "msetnx" + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Set Redis `SET key value [expiration]` command. +// Use expiration for `SETEX`-like behavior. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetArgs provides arguments for the SetArgs function. +type SetArgs struct { + // Mode can be `NX` or `XX` or empty. + Mode string + + // Zero `TTL` or `Expiration` means that the key has no expiration time. + TTL time.Duration + ExpireAt time.Time + + // When Get is true, the command returns the old value stored at key, or nil when key did not exist. + Get bool + + // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, + // otherwise you will receive an error: (error) ERR syntax error. + KeepTTL bool +} + +// SetArgs supports all the options that the SET command supports. +// It is the alternative to the Set function when you want +// to have more control over the options. +func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { + args := []interface{}{"set", key, value} + + if a.KeepTTL { + args = append(args, "keepttl") + } + + if !a.ExpireAt.IsZero() { + args = append(args, "exat", a.ExpireAt.Unix()) + } + if a.TTL > 0 { + if usePrecise(a.TTL) { + args = append(args, "px", formatMs(ctx, a.TTL)) + } else { + args = append(args, "ex", formatSec(ctx, a.TTL)) + } + } + + if a.Mode != "" { + args = append(args, a.Mode) + } + + if a.Get { + args = append(args, "get") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetEX Redis `SETEX key expiration value` command. +func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) + _ = c(ctx, cmd) + return cmd +} + +// SetNX Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd(ctx, "setnx", key, value) + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +// SetXX Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + cmd = NewBoolCmd(ctx, "set", key, value, "xx") + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd(ctx, "setrange", key, offset, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "strlen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { + args := []interface{}{"copy", sourceKey, destKey, "DB", db} + if replace { + args = append(args, "REPLACE") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { + cmd := NewIntCmd(ctx, "getbit", key, offset) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + ctx, + "setbit", + key, + offset, + value, + ) + _ = c(ctx, cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "and", destKey, keys...) +} + +func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "or", destKey, keys...) +} + +func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "xor", destKey, keys...) +} + +func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { + return c.bitOp(ctx, "not", destKey, key) +} + +func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { + a := make([]interface{}, 0, 2+len(args)) + a = append(a, "bitfield") + a = append(a, key) + a = append(a, args...) + cmd := NewIntSliceCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + if keyType != "" { + args = append(args, "type", keyType) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { + cmd := NewBoolCmd(ctx, "hexists", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { + cmd := NewStringCmd(ctx, "hget", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "hgetall", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { + cmd := NewIntCmd(ctx, "hincrby", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hkeys", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "hlen", key) + _ = c(ctx, cmd) + return cmd +} + +// HMGet returns the values for the specified fields in the hash stored at key. +// It returns an interface{} to distinguish between empty string and nil value. +func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HSet accepts values in following formats: +// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// +// Note that it requires Redis v4 for multiple field/value pairs support. +func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hset" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HMSet is a deprecated version of HSet left for compatibility with Redis 3. +func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hmset" + args[1] = key + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hvals", key) + _ = c(ctx, cmd) + return cmd +} + +// HRandField redis-server version >= 6.2.0. +func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "hrandfield", key, count) + if withValues { + args = append(args, "withvalues") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + ctx, + "brpoplpush", + source, + destination, + formatSec(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { + cmd := NewStringCmd(ctx, "lindex", key, index) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "llen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "lpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "lpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +type LPosArgs struct { + Rank, MaxLen int64 +} + +func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { + args := []interface{}{"lpos", key, value} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { + args := []interface{}{"lpos", key, value, "count", count} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + ctx, + "lrange", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "lrem", key, count, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "lset", key, index, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "ltrim", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "rpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "rpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { + cmd := NewStringCmd(ctx, "rpoplpush", source, destination) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { + cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BLMove( + ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, +) *StringCmd { + cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "scard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "sismember", key, member) + _ = c(ctx, cmd) + return cmd +} + +// SMIsMember Redis `SMISMEMBER key member [member ...]` command. +func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "smismember" + args[1] = key + args = appendArgs(args, members) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SMembers Redis `SMEMBERS key` command output as a slice. +func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +// SMembersMap Redis `SMEMBERS key` command output as a map. +func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "smove", source, destination, member) + _ = c(ctx, cmd) + return cmd +} + +// SPop Redis `SPOP key` command. +func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "spop", key) + _ = c(ctx, cmd) + return cmd +} + +// SPopN Redis `SPOP key count` command. +func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "spop", key, count) + _ = c(ctx, cmd) + return cmd +} + +// SRandMember Redis `SRANDMEMBER key` command. +func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "srandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// SRandMemberN Redis `SRANDMEMBER key count` command. +func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "srandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// XAddArgs accepts values in the following formats: +// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} +// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") +// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} +// +// Note that map will not preserve the order of key-value pairs. +// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. +type XAddArgs struct { + Stream string + NoMkStream bool + MaxLen int64 // MAXLEN N + + // Deprecated: use MaxLen+Approx, remove in v9. + MaxLenApprox int64 // MAXLEN ~ N + + MinID string + // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). + Approx bool + Limit int64 + ID string + Values interface{} +} + +// XAdd a.Limit has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 11) + args = append(args, "xadd", a.Stream) + if a.NoMkStream { + args = append(args, "nomkstream") + } + switch { + case a.MaxLen > 0: + if a.Approx { + args = append(args, "maxlen", "~", a.MaxLen) + } else { + args = append(args, "maxlen", a.MaxLen) + } + case a.MaxLenApprox > 0: + // TODO remove in v9. + args = append(args, "maxlen", "~", a.MaxLenApprox) + case a.MinID != "": + if a.Approx { + args = append(args, "minid", "~", a.MinID) + } else { + args = append(args, "minid", a.MinID) + } + } + if a.Limit > 0 { + args = append(args, "limit", a.Limit) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + args = appendArg(args, a.Values) + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { + cmd := NewIntCmd(ctx, "xlen", stream) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +type XReadArgs struct { + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration +} + +func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 6+len(a.Streams)) + args = append(args, "xread") + + keyPos := int8(1) + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + keyPos += 2 + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { + return c.XRead(ctx, &XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration + NoAck bool +} + +func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 10+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + + keyPos := int8(4) + if a.Count > 0 { + args = append(args, "count", a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + keyPos += 2 + } + if a.NoAck { + args = append(args, "noack") + keyPos++ + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { + cmd := NewXPendingCmd(ctx, "xpending", stream, group) + _ = c(ctx, cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Idle time.Duration + Start string + End string + Count int64 + Consumer string +} + +func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 9) + args = append(args, "xpending", a.Stream, a.Group) + if a.Idle != 0 { + args = append(args, "idle", formatMs(ctx, a.Idle)) + } + args = append(args, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type XAutoClaimArgs struct { + Stream string + Group string + MinIdle time.Duration + Start string + Count int64 + Consumer string +} + +func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { + args := xAutoClaimArgs(ctx, a) + cmd := NewXAutoClaimCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { + args := xAutoClaimArgs(ctx, a) + args = append(args, "justid") + cmd := NewXAutoClaimJustIDCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { + args := make([]interface{}, 0, 8) + args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + return args +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 5+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). +// example: +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// The redis-server version is lower than 6.2, please set limit to 0. +func (c cmdable) xTrim( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Deprecated: use XTrimMaxLen, remove in v9. +func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// Deprecated: use XTrimMaxLenApprox, remove in v9. +func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) +} + +// XTrimMaxLen No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MAXLEN maxLen +func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit +func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) +} + +// XTrimMinID No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MINID minID +func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { + return c.xTrim(ctx, key, "minid", false, minID, 0) +} + +// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MINID ~ minID LIMIT limit +func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { + return c.xTrim(ctx, key, "minid", true, minID, limit) +} + +func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { + cmd := NewXInfoConsumersCmd(ctx, key, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { + cmd := NewXInfoGroupsCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { + cmd := NewXInfoStreamCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +// XInfoStreamFull XINFO STREAM FULL [COUNT count] +// redis-server >= 6.0. +func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { + args := make([]interface{}, 0, 6) + args = append(args, "xinfo", "stream", key, "full") + if count > 0 { + args = append(args, "count", count) + } + cmd := NewXInfoStreamFullCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. +type ZStore struct { + Keys []string + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (z ZStore) len() (n int) { + n = len(z.Keys) + if len(z.Weights) > 0 { + n += 1 + len(z.Weights) + } + if z.Aggregate != "" { + n += 2 + } + return n +} + +func (z ZStore) appendArgs(args []interface{}) []interface{} { + for _, key := range z.Keys { + args = append(args, key) + } + if len(z.Weights) > 0 { + args = append(args, "weights") + for _, weights := range z.Weights { + args = append(args, weights) + } + } + if z.Aggregate != "" { + args = append(args, "aggregate", z.Aggregate) + } + return args +} + +// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. +func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. +func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. +type ZAddArgs struct { + NX bool + XX bool + LT bool + GT bool + Ch bool + Members []Z +} + +func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { + a := make([]interface{}, 0, 6+2*len(args.Members)) + a = append(a, "zadd", key) + + // The GT, LT and NX options are mutually exclusive. + if args.NX { + a = append(a, "nx") + } else { + if args.XX { + a = append(a, "xx") + } + if args.GT { + a = append(a, "gt") + } else if args.LT { + a = append(a, "lt") + } + } + if args.Ch { + a = append(a, "ch") + } + if incr { + a = append(a, "incr") + } + for _, m := range args.Members { + a = append(a, m.Score) + a = append(a, m.Member) + } + return a +} + +func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { + cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) + _ = c(ctx, cmd) + return cmd +} + +// TODO: Compatible with v8 api, will be removed in v9. +func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { + args.Members = make([]Z, len(members)) + for i, m := range members { + args.Members[i] = *m + } + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +// ZAdd Redis `ZADD key score member [score member ...]` command. +func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{}, members...) +} + +// ZAddNX Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + }, members...) +} + +// ZAddXX Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + }, members...) +} + +// ZAddCh Redis `ZADD key CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + Ch: true, + }, members...) +} + +// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// NX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + Ch: true, + }, members...) +} + +// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// XX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + Ch: true, + }, members...) +} + +// ZIncr Redis `ZADD key INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + Members: []Z{*member}, + }) +} + +// ZIncrNX Redis `ZADD key NX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// NX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + NX: true, + Members: []Z{*member}, + }) +} + +// ZIncrXX Redis `ZADD key XX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// XX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + XX: true, + Members: []Z{*member}, + }) +} + +func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "zcard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zlexcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinterstore", destination, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "zmscore" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZRangeArgs is all the options of the ZRange command. +// In version> 6.2.0, you can replace the(cmd): +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// Please pay attention to your redis-server version. +// +// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. +type ZRangeArgs struct { + Key string + + // When the ByScore option is provided, the open interval(exclusive) can be set. + // By default, the score intervals specified by and are closed (inclusive). + // It is similar to the deprecated(6.2.0+) ZRangeByScore command. + // For example: + // ZRangeArgs{ + // Key: "example-key", + // Start: "(3", + // Stop: 8, + // ByScore: true, + // } + // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). + // + // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. + // You can set the and options as follows: + // ZRangeArgs{ + // Key: "example-key", + // Start: "[abc", + // Stop: "(def", + // ByLex: true, + // } + // cmd: "ZRange example-key [abc (def ByLex" + // + // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int). + // You can read the documentation for more information: https://redis.io/commands/zrange + Start interface{} + Stop interface{} + + // The ByScore and ByLex options are mutually exclusive. + ByScore bool + ByLex bool + + Rev bool + + // limit offset count. + Offset int64 + Count int64 +} + +func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { + // For Rev+ByScore/ByLex, we need to adjust the position of and . + if z.Rev && (z.ByScore || z.ByLex) { + args = append(args, z.Key, z.Stop, z.Start) + } else { + args = append(args, z.Key, z.Start, z.Stop) + } + + if z.ByScore { + args = append(args, "byscore") + } else if z.ByLex { + args = append(args, "bylex") + } + if z.Rev { + args = append(args, "rev") + } + if z.Offset != 0 || z.Count != 0 { + args = append(args, "limit", z.Offset, z.Count) + } + return args +} + +func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { + args := make([]interface{}, 0, 9) + args = append(args, "zrange") + args = z.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrange") + args = z.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + return c.ZRangeArgs(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) +} + +func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebylex", key, opt, false) +} + +func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrangestore", dst) + args = z.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + ctx, + "zremrangebyrank", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) +} + +func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) +} + +func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrevrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zscore", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunionstore", dest, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +// ZRandMember redis-server version >= 6.2.0. +func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "zrandmember", key, count) + if withScores { + args = append(args, "withscores") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZDiff redis-server version >= 6.2.0. +func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffWithScores redis-server version >= 6.2.0. +func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + args[len(keys)+2] = "withscores" + + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffStore redis-server version >=6.2.0. +func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 0, 3+len(keys)) + args = append(args, "zdiffstore", destination, len(keys)) + for _, key := range keys { + args = append(args, key) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgrewriteaof") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BgSave(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { + cmd := NewStatusCmd(ctx, "client", "kill", ipPort) + _ = c(ctx, cmd) + return cmd +} + +// ClientKillByFilter is new style syntax, while the ClientKill is old +// +// CLIENT KILL ?db= +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// Examples: +// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 +// is equivalent to: +// &Options{ +// Network: "tcp", +// Addr: "localhost:6789", +// DB: 1, // path "/3" was overridden by "&db=1" +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// MaxRetries: 2, +// } +func ParseURL(redisURL string) (*Options, error) { + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + + switch u.Scheme { + case "redis", "rediss": + return setupTCPConn(u) + case "unix": + return setupUnixConn(u) + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } +} + +func setupTCPConn(u *url.URL) (*Options, error) { + o := &Options{Network: "tcp"} + + o.Username, o.Password = getUserPassword(u) + + h, p, err := net.SplitHostPort(u.Host) + if err != nil { + h = u.Host + } + if h == "" { + h = "localhost" + } + if p == "" { + p = "6379" + } + o.Addr = net.JoinHostPort(h, p) + + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + o.DB = 0 + case 1: + if o.DB, err = strconv.Atoi(f[0]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) + } + + if u.Scheme == "rediss" { + o.TLSConfig = &tls.Config{ServerName: h} + } + + return setupConnParams(u, o) +} + +func setupUnixConn(u *url.URL) (*Options, error) { + o := &Options{ + Network: "unix", + } + + if strings.TrimSpace(u.Path) == "" { // path is required with unix connection + return nil, errors.New("redis: empty unix socket path") + } + o.Addr = u.Path + o.Username, o.Password = getUserPassword(u) + return setupConnParams(u, o) +} + +type queryOptions struct { + q url.Values + err error +} + +func (o *queryOptions) string(name string) string { + vs := o.q[name] + if len(vs) == 0 { + return "" + } + delete(o.q, name) // enable detection of unknown parameters + return vs[len(vs)-1] +} + +func (o *queryOptions) int(name string) int { + s := o.string(name) + if s == "" { + return 0 + } + i, err := strconv.Atoi(s) + if err == nil { + return i + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s number: %s", name, err) + } + return 0 +} + +func (o *queryOptions) duration(name string) time.Duration { + s := o.string(name) + if s == "" { + return 0 + } + // try plain number first + if i, err := strconv.Atoi(s); err == nil { + if i <= 0 { + // disable timeouts + return -1 + } + return time.Duration(i) * time.Second + } + dur, err := time.ParseDuration(s) + if err == nil { + return dur + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err) + } + return 0 +} + +func (o *queryOptions) bool(name string) bool { + switch s := o.string(name); s { + case "true", "1": + return true + case "false", "0", "": + return false + default: + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s) + } + return false + } +} + +func (o *queryOptions) remaining() []string { + if len(o.q) == 0 { + return nil + } + keys := make([]string, 0, len(o.q)) + for k := range o.q { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// setupConnParams converts query parameters in u to option value in o. +func setupConnParams(u *url.URL, o *Options) (*Options, error) { + q := queryOptions{q: u.Query()} + + // compat: a future major release may use q.int("db") + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxConnAge = q.duration("max_conn_age") + o.PoolTimeout = q.duration("pool_timeout") + o.IdleTimeout = q.duration("idle_timeout") + o.IdleCheckFrequency = q.duration("idle_check_frequency") + if q.err != nil { + return nil, q.err + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + +func getUserPassword(u *url.URL) (string, string) { + var user, password string + if u.User != nil { + user = u.User.Username() + if p, ok := u.User.Password(); ok { + password = p + } + } + return user, password +} + +func newConnPool(opt *Options) *pool.ConnPool { + return pool.NewConnPool(&pool.Options{ + Dialer: func(ctx context.Context) (net.Conn, error) { + return opt.Dialer(ctx, opt.Network, opt.Addr) + }, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + }) +} diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json new file mode 100644 index 00000000..e4ea4bb0 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/package.json @@ -0,0 +1,8 @@ +{ + "name": "redis", + "version": "8.11.5", + "main": "index.js", + "repository": "git@github.com:go-redis/redis.git", + "author": "Vladimir Mihailenco ", + "license": "BSD-2-clause" +} diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go new file mode 100644 index 00000000..31bab971 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/pipeline.go @@ -0,0 +1,147 @@ +package redis + +import ( + "context" + "sync" + + "github.com/go-redis/redis/v8/internal/pool" +) + +type pipelineExecer func(context.Context, []Cmder) error + +// Pipeliner is an mechanism to realise Redis Pipeline technique. +// +// Pipelining is a technique to extremely speed up processing by packing +// operations to batches, send them at once to Redis and read a replies in a +// singe step. +// See https://redis.io/topics/pipelining +// +// Pay attention, that Pipeline is not a transaction, so you can get unexpected +// results in case of big pipelines and small read/write timeouts. +// Redis client has retransmission logic in case of timeouts, pipeline +// can be retransmitted and commands can be executed more then once. +// To avoid this: it is good idea to use reasonable bigger read/write timeouts +// depends of your batch size and/or use TxPipeline. +type Pipeliner interface { + StatefulCmdable + Len() int + Do(ctx context.Context, args ...interface{}) *Cmd + Process(ctx context.Context, cmd Cmder) error + Close() error + Discard() error + Exec(ctx context.Context) ([]Cmder, error) +} + +var _ Pipeliner = (*Pipeline)(nil) + +// Pipeline implements pipelining as described in +// http://redis.io/topics/pipelining. It's safe for concurrent use +// by multiple goroutines. +type Pipeline struct { + cmdable + statefulCmdable + + ctx context.Context + exec pipelineExecer + + mu sync.Mutex + cmds []Cmder + closed bool +} + +func (c *Pipeline) init() { + c.cmdable = c.Process + c.statefulCmdable = c.Process +} + +// Len returns the number of queued commands. +func (c *Pipeline) Len() int { + c.mu.Lock() + ln := len(c.cmds) + c.mu.Unlock() + return ln +} + +// Do queues the custom command for later execution. +func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Process queues the cmd for later execution. +func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { + c.mu.Lock() + c.cmds = append(c.cmds, cmd) + c.mu.Unlock() + return nil +} + +// Close closes the pipeline, releasing any open resources. +func (c *Pipeline) Close() error { + c.mu.Lock() + _ = c.discard() + c.closed = true + c.mu.Unlock() + return nil +} + +// Discard resets the pipeline and discards queued commands. +func (c *Pipeline) Discard() error { + c.mu.Lock() + err := c.discard() + c.mu.Unlock() + return err +} + +func (c *Pipeline) discard() error { + if c.closed { + return pool.ErrClosed + } + c.cmds = c.cmds[:0] + return nil +} + +// Exec executes all previously queued commands using one +// client-server roundtrip. +// +// Exec always returns list of commands and error of the first failed +// command if any. +func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + if len(c.cmds) == 0 { + return nil, nil + } + + cmds := c.cmds + c.cmds = nil + + return cmds, c.exec(ctx, cmds) +} + +func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + if err := fn(c); err != nil { + return nil, err + } + cmds, err := c.Exec(ctx) + _ = c.Close() + return cmds, err +} + +func (c *Pipeline) Pipeline() Pipeliner { + return c +} + +func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipelined(ctx, fn) +} + +func (c *Pipeline) TxPipeline() Pipeliner { + return c +} diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go new file mode 100644 index 00000000..efc2354a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/pubsub.go @@ -0,0 +1,668 @@ +package redis + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// PubSub implements Pub/Sub commands as described in +// http://redis.io/topics/pubsub. Message receiving is NOT safe +// for concurrent use by multiple goroutines. +// +// PubSub automatically reconnects to Redis Server and resubscribes +// to the channels in case of network errors. +type PubSub struct { + opt *Options + + newConn func(ctx context.Context, channels []string) (*pool.Conn, error) + closeConn func(*pool.Conn) error + + mu sync.Mutex + cn *pool.Conn + channels map[string]struct{} + patterns map[string]struct{} + + closed bool + exit chan struct{} + + cmd *Cmd + + chOnce sync.Once + msgCh *channel + allCh *channel +} + +func (c *PubSub) init() { + c.exit = make(chan struct{}) +} + +func (c *PubSub) String() string { + channels := mapKeys(c.channels) + channels = append(channels, mapKeys(c.patterns)...) + return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", ")) +} + +func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) { + c.mu.Lock() + cn, err := c.conn(ctx, nil) + c.mu.Unlock() + return cn, err +} + +func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) { + if c.closed { + return nil, pool.ErrClosed + } + if c.cn != nil { + return c.cn, nil + } + + channels := mapKeys(c.channels) + channels = append(channels, newChannels...) + + cn, err := c.newConn(ctx, channels) + if err != nil { + return nil, err + } + + if err := c.resubscribe(ctx, cn); err != nil { + _ = c.closeConn(cn) + return nil, err + } + + c.cn = cn + return cn, nil +} + +func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { + return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmd) + }) +} + +func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error { + var firstErr error + + if len(c.channels) > 0 { + firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels)) + } + + if len(c.patterns) > 0 { + err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns)) + if err != nil && firstErr == nil { + firstErr = err + } + } + + return firstErr +} + +func mapKeys(m map[string]struct{}) []string { + s := make([]string, len(m)) + i := 0 + for k := range m { + s[i] = k + i++ + } + return s +} + +func (c *PubSub) _subscribe( + ctx context.Context, cn *pool.Conn, redisCmd string, channels []string, +) error { + args := make([]interface{}, 0, 1+len(channels)) + args = append(args, redisCmd) + for _, channel := range channels { + args = append(args, channel) + } + cmd := NewSliceCmd(ctx, args...) + return c.writeCmd(ctx, cn, cmd) +} + +func (c *PubSub) releaseConnWithLock( + ctx context.Context, + cn *pool.Conn, + err error, + allowTimeout bool, +) { + c.mu.Lock() + c.releaseConn(ctx, cn, err, allowTimeout) + c.mu.Unlock() +} + +func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) { + if c.cn != cn { + return + } + if isBadConn(err, allowTimeout, c.opt.Addr) { + c.reconnect(ctx, err) + } +} + +func (c *PubSub) reconnect(ctx context.Context, reason error) { + _ = c.closeTheCn(reason) + _, _ = c.conn(ctx, nil) +} + +func (c *PubSub) closeTheCn(reason error) error { + if c.cn == nil { + return nil + } + if !c.closed { + internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) + } + err := c.closeConn(c.cn) + c.cn = nil + return err +} + +func (c *PubSub) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return pool.ErrClosed + } + c.closed = true + close(c.exit) + + return c.closeTheCn(pool.ErrClosed) +} + +// Subscribe the client to the specified channels. It returns +// empty subscription if there are no channels. +func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + err := c.subscribe(ctx, "subscribe", channels...) + if c.channels == nil { + c.channels = make(map[string]struct{}) + } + for _, s := range channels { + c.channels[s] = struct{}{} + } + return err +} + +// PSubscribe the client to the given patterns. It returns +// empty subscription if there are no patterns. +func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + err := c.subscribe(ctx, "psubscribe", patterns...) + if c.patterns == nil { + c.patterns = make(map[string]struct{}) + } + for _, s := range patterns { + c.patterns[s] = struct{}{} + } + return err +} + +// Unsubscribe the client from the given channels, or from all of +// them if none is given. +func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, channel := range channels { + delete(c.channels, channel) + } + err := c.subscribe(ctx, "unsubscribe", channels...) + return err +} + +// PUnsubscribe the client from the given patterns, or from all of +// them if none is given. +func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, pattern := range patterns { + delete(c.patterns, pattern) + } + err := c.subscribe(ctx, "punsubscribe", patterns...) + return err +} + +func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error { + cn, err := c.conn(ctx, channels) + if err != nil { + return err + } + + err = c._subscribe(ctx, cn, redisCmd, channels) + c.releaseConn(ctx, cn, err, false) + return err +} + +func (c *PubSub) Ping(ctx context.Context, payload ...string) error { + args := []interface{}{"ping"} + if len(payload) == 1 { + args = append(args, payload[0]) + } + cmd := NewCmd(ctx, args...) + + c.mu.Lock() + defer c.mu.Unlock() + + cn, err := c.conn(ctx, nil) + if err != nil { + return err + } + + err = c.writeCmd(ctx, cn, cmd) + c.releaseConn(ctx, cn, err, false) + return err +} + +// Subscription received after a successful subscription to channel. +type Subscription struct { + // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". + Kind string + // Channel name we have subscribed to. + Channel string + // Number of channels we are currently subscribed to. + Count int +} + +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} + +// Message received as result of a PUBLISH command issued by another client. +type Message struct { + Channel string + Pattern string + Payload string + PayloadSlice []string +} + +func (m *Message) String() string { + return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) +} + +// Pong received as result of a PING command issued by another client. +type Pong struct { + Payload string +} + +func (p *Pong) String() string { + if p.Payload != "" { + return fmt.Sprintf("Pong<%s>", p.Payload) + } + return "Pong" +} + +func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { + switch reply := reply.(type) { + case string: + return &Pong{ + Payload: reply, + }, nil + case []interface{}: + switch kind := reply[0].(string); kind { + case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": + // Can be nil in case of "unsubscribe". + channel, _ := reply[1].(string) + return &Subscription{ + Kind: kind, + Channel: channel, + Count: int(reply[2].(int64)), + }, nil + case "message": + switch payload := reply[2].(type) { + case string: + return &Message{ + Channel: reply[1].(string), + Payload: payload, + }, nil + case []interface{}: + ss := make([]string, len(payload)) + for i, s := range payload { + ss[i] = s.(string) + } + return &Message{ + Channel: reply[1].(string), + PayloadSlice: ss, + }, nil + default: + return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload) + } + case "pmessage": + return &Message{ + Pattern: reply[1].(string), + Channel: reply[2].(string), + Payload: reply[3].(string), + }, nil + case "pong": + return &Pong{ + Payload: reply[1].(string), + }, nil + default: + return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) + } + default: + return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) + } +} + +// ReceiveTimeout acts like Receive but returns an error if message +// is not received in time. This is low-level API and in most cases +// Channel should be used instead. +func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) { + if c.cmd == nil { + c.cmd = NewCmd(ctx) + } + + // Don't hold the lock to allow subscriptions and pings. + + cn, err := c.connWithLock(ctx) + if err != nil { + return nil, err + } + + err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { + return c.cmd.readReply(rd) + }) + + c.releaseConnWithLock(ctx, cn, err, timeout > 0) + + if err != nil { + return nil, err + } + + return c.newMessage(c.cmd.Val()) +} + +// Receive returns a message as a Subscription, Message, Pong or error. +// See PubSub example for details. This is low-level API and in most cases +// Channel should be used instead. +func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { + return c.ReceiveTimeout(ctx, 0) +} + +// ReceiveMessage returns a Message or error ignoring Subscription and Pong +// messages. This is low-level API and in most cases Channel should be used +// instead. +func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) { + for { + msg, err := c.Receive(ctx) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *Subscription: + // Ignore. + case *Pong: + // Ignore. + case *Message: + return msg, nil + default: + err := fmt.Errorf("redis: unknown message: %T", msg) + return nil, err + } + } +} + +func (c *PubSub) getContext() context.Context { + if c.cmd != nil { + return c.cmd.ctx + } + return context.Background() +} + +//------------------------------------------------------------------------------ + +// Channel returns a Go channel for concurrently receiving messages. +// The channel is closed together with the PubSub. If the Go channel +// is blocked full for 30 seconds the message is dropped. +// Receive* APIs can not be used after channel is created. +// +// go-redis periodically sends ping messages to test connection health +// and re-subscribes if ping can not not received for 30 seconds. +func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message { + c.chOnce.Do(func() { + c.msgCh = newChannel(c, opts...) + c.msgCh.initMsgChan() + }) + if c.msgCh == nil { + err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions") + panic(err) + } + return c.msgCh.msgCh +} + +// ChannelSize is like Channel, but creates a Go channel +// with specified buffer size. +// +// Deprecated: use Channel(WithChannelSize(size)), remove in v9. +func (c *PubSub) ChannelSize(size int) <-chan *Message { + return c.Channel(WithChannelSize(size)) +} + +// ChannelWithSubscriptions is like Channel, but message type can be either +// *Subscription or *Message. Subscription messages can be used to detect +// reconnections. +// +// ChannelWithSubscriptions can not be used together with Channel or ChannelSize. +func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} { + c.chOnce.Do(func() { + c.allCh = newChannel(c, WithChannelSize(size)) + c.allCh.initAllChan() + }) + if c.allCh == nil { + err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel") + panic(err) + } + return c.allCh.allCh +} + +type ChannelOption func(c *channel) + +// WithChannelSize specifies the Go chan size that is used to buffer incoming messages. +// +// The default is 100 messages. +func WithChannelSize(size int) ChannelOption { + return func(c *channel) { + c.chanSize = size + } +} + +// WithChannelHealthCheckInterval specifies the health check interval. +// PubSub will ping Redis Server if it does not receive any messages within the interval. +// To disable health check, use zero interval. +// +// The default is 3 seconds. +func WithChannelHealthCheckInterval(d time.Duration) ChannelOption { + return func(c *channel) { + c.checkInterval = d + } +} + +// WithChannelSendTimeout specifies the channel send timeout after which +// the message is dropped. +// +// The default is 60 seconds. +func WithChannelSendTimeout(d time.Duration) ChannelOption { + return func(c *channel) { + c.chanSendTimeout = d + } +} + +type channel struct { + pubSub *PubSub + + msgCh chan *Message + allCh chan interface{} + ping chan struct{} + + chanSize int + chanSendTimeout time.Duration + checkInterval time.Duration +} + +func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel { + c := &channel{ + pubSub: pubSub, + + chanSize: 100, + chanSendTimeout: time.Minute, + checkInterval: 3 * time.Second, + } + for _, opt := range opts { + opt(c) + } + if c.checkInterval > 0 { + c.initHealthCheck() + } + return c +} + +func (c *channel) initHealthCheck() { + ctx := context.TODO() + c.ping = make(chan struct{}, 1) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + for { + timer.Reset(c.checkInterval) + select { + case <-c.ping: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + if pingErr := c.pubSub.Ping(ctx); pingErr != nil { + c.pubSub.mu.Lock() + c.pubSub.reconnect(ctx, pingErr) + c.pubSub.mu.Unlock() + } + case <-c.pubSub.exit: + return + } + } + }() +} + +// initMsgChan must be in sync with initAllChan. +func (c *channel) initMsgChan() { + ctx := context.TODO() + c.msgCh = make(chan *Message, c.chanSize) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + var errCount int + for { + msg, err := c.pubSub.Receive(ctx) + if err != nil { + if err == pool.ErrClosed { + close(c.msgCh) + return + } + if errCount > 0 { + time.Sleep(100 * time.Millisecond) + } + errCount++ + continue + } + + errCount = 0 + + // Any message is as good as a ping. + select { + case c.ping <- struct{}{}: + default: + } + + switch msg := msg.(type) { + case *Subscription: + // Ignore. + case *Pong: + // Ignore. + case *Message: + timer.Reset(c.chanSendTimeout) + select { + case c.msgCh <- msg: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + internal.Logger.Printf( + ctx, "redis: %s channel is full for %s (message is dropped)", + c, c.chanSendTimeout) + } + default: + internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) + } + } + }() +} + +// initAllChan must be in sync with initMsgChan. +func (c *channel) initAllChan() { + ctx := context.TODO() + c.allCh = make(chan interface{}, c.chanSize) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + var errCount int + for { + msg, err := c.pubSub.Receive(ctx) + if err != nil { + if err == pool.ErrClosed { + close(c.allCh) + return + } + if errCount > 0 { + time.Sleep(100 * time.Millisecond) + } + errCount++ + continue + } + + errCount = 0 + + // Any message is as good as a ping. + select { + case c.ping <- struct{}{}: + default: + } + + switch msg := msg.(type) { + case *Pong: + // Ignore. + case *Subscription, *Message: + timer.Reset(c.chanSendTimeout) + select { + case c.allCh <- msg: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + internal.Logger.Printf( + ctx, "redis: %s channel is full for %s (message is dropped)", + c, c.chanSendTimeout) + } + default: + internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) + } + } + }() +} diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go new file mode 100644 index 00000000..bcf8a2a9 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/redis.go @@ -0,0 +1,773 @@ +package redis + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// Nil reply returned by Redis when key does not exist. +const Nil = proto.Nil + +func SetLogger(logger internal.Logging) { + internal.Logger = logger +} + +//------------------------------------------------------------------------------ + +type Hook interface { + BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) + AfterProcess(ctx context.Context, cmd Cmder) error + + BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) + AfterProcessPipeline(ctx context.Context, cmds []Cmder) error +} + +type hooks struct { + hooks []Hook +} + +func (hs *hooks) lock() { + hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)] +} + +func (hs hooks) clone() hooks { + clone := hs + clone.lock() + return clone +} + +func (hs *hooks) AddHook(hook Hook) { + hs.hooks = append(hs.hooks, hook) +} + +func (hs hooks) process( + ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error, +) error { + if len(hs.hooks) == 0 { + err := fn(ctx, cmd) + cmd.SetErr(err) + return err + } + + var hookIndex int + var retErr error + + for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { + ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd) + if retErr != nil { + cmd.SetErr(retErr) + } + } + + if retErr == nil { + retErr = fn(ctx, cmd) + cmd.SetErr(retErr) + } + + for hookIndex--; hookIndex >= 0; hookIndex-- { + if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil { + retErr = err + cmd.SetErr(retErr) + } + } + + return retErr +} + +func (hs hooks) processPipeline( + ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, +) error { + if len(hs.hooks) == 0 { + err := fn(ctx, cmds) + return err + } + + var hookIndex int + var retErr error + + for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { + ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds) + if retErr != nil { + setCmdsErr(cmds, retErr) + } + } + + if retErr == nil { + retErr = fn(ctx, cmds) + } + + for hookIndex--; hookIndex >= 0; hookIndex-- { + if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil { + retErr = err + setCmdsErr(cmds, retErr) + } + } + + return retErr +} + +func (hs hooks) processTxPipeline( + ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, +) error { + cmds = wrapMultiExec(ctx, cmds) + return hs.processPipeline(ctx, cmds, fn) +} + +//------------------------------------------------------------------------------ + +type baseClient struct { + opt *Options + connPool pool.Pooler + + onClose func() error // hook called when client is closed +} + +func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient { + return &baseClient{ + opt: opt, + connPool: connPool, + } +} + +func (c *baseClient) clone() *baseClient { + clone := *c + return &clone +} + +func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { + opt := c.opt.clone() + opt.ReadTimeout = timeout + opt.WriteTimeout = timeout + + clone := c.clone() + clone.opt = opt + + return clone +} + +func (c *baseClient) String() string { + return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) +} + +func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { + cn, err := c.connPool.NewConn(ctx) + if err != nil { + return nil, err + } + + err = c.initConn(ctx, cn) + if err != nil { + _ = c.connPool.CloseConn(cn) + return nil, err + } + + return cn, nil +} + +func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { + if c.opt.Limiter != nil { + err := c.opt.Limiter.Allow() + if err != nil { + return nil, err + } + } + + cn, err := c._getConn(ctx) + if err != nil { + if c.opt.Limiter != nil { + c.opt.Limiter.ReportResult(err) + } + return nil, err + } + + return cn, nil +} + +func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { + cn, err := c.connPool.Get(ctx) + if err != nil { + return nil, err + } + + if cn.Inited { + return cn, nil + } + + if err := c.initConn(ctx, cn); err != nil { + c.connPool.Remove(ctx, cn, err) + if err := errors.Unwrap(err); err != nil { + return nil, err + } + return nil, err + } + + return cn, nil +} + +func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { + if cn.Inited { + return nil + } + cn.Inited = true + + if c.opt.Password == "" && + c.opt.DB == 0 && + !c.opt.readOnly && + c.opt.OnConnect == nil { + return nil + } + + connPool := pool.NewSingleConnPool(c.connPool, cn) + conn := newConn(ctx, c.opt, connPool) + + _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { + if c.opt.Password != "" { + if c.opt.Username != "" { + pipe.AuthACL(ctx, c.opt.Username, c.opt.Password) + } else { + pipe.Auth(ctx, c.opt.Password) + } + } + + if c.opt.DB > 0 { + pipe.Select(ctx, c.opt.DB) + } + + if c.opt.readOnly { + pipe.ReadOnly(ctx) + } + + return nil + }) + if err != nil { + return err + } + + if c.opt.OnConnect != nil { + return c.opt.OnConnect(ctx, conn) + } + return nil +} + +func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { + if c.opt.Limiter != nil { + c.opt.Limiter.ReportResult(err) + } + + if isBadConn(err, false, c.opt.Addr) { + c.connPool.Remove(ctx, cn, err) + } else { + c.connPool.Put(ctx, cn) + } +} + +func (c *baseClient) withConn( + ctx context.Context, fn func(context.Context, *pool.Conn) error, +) error { + cn, err := c.getConn(ctx) + if err != nil { + return err + } + + defer func() { + c.releaseConn(ctx, cn, err) + }() + + done := ctx.Done() //nolint:ifshort + + if done == nil { + err = fn(ctx, cn) + return err + } + + errc := make(chan error, 1) + go func() { errc <- fn(ctx, cn) }() + + select { + case <-done: + _ = cn.Close() + // Wait for the goroutine to finish and send something. + <-errc + + err = ctx.Err() + return err + case err = <-errc: + return err + } +} + +func (c *baseClient) process(ctx context.Context, cmd Cmder) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + attempt := attempt + + retry, err := c._process(ctx, cmd, attempt) + if err == nil || !retry { + return err + } + + lastErr = err + } + return lastErr +} + +func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return false, err + } + } + + retryTimeout := uint32(1) + err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmd) + }) + if err != nil { + return err + } + + err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply) + if err != nil { + if cmd.readTimeout() == nil { + atomic.StoreUint32(&retryTimeout, 1) + } + return err + } + + return nil + }) + if err == nil { + return false, nil + } + + retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) + return retry, err +} + +func (c *baseClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { + if timeout := cmd.readTimeout(); timeout != nil { + t := *timeout + if t == 0 { + return 0 + } + return t + 10*time.Second + } + return c.opt.ReadTimeout +} + +// Close closes the client, releasing any open resources. +// +// It is rare to Close a Client, as the Client is meant to be +// long-lived and shared between many goroutines. +func (c *baseClient) Close() error { + var firstErr error + if c.onClose != nil { + if err := c.onClose(); err != nil { + firstErr = err + } + } + if err := c.connPool.Close(); err != nil && firstErr == nil { + firstErr = err + } + return firstErr +} + +func (c *baseClient) getAddr() string { + return c.opt.Addr +} + +func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds) +} + +func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds) +} + +type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) + +func (c *baseClient) generalProcessPipeline( + ctx context.Context, cmds []Cmder, p pipelineProcessor, +) error { + err := c._generalProcessPipeline(ctx, cmds, p) + if err != nil { + setCmdsErr(cmds, err) + return err + } + return cmdsFirstErr(cmds) +} + +func (c *baseClient) _generalProcessPipeline( + ctx context.Context, cmds []Cmder, p pipelineProcessor, +) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + var canRetry bool + lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + var err error + canRetry, err = p(ctx, cn, cmds) + return err + }) + if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { + return lastErr + } + } + return lastErr +} + +func (c *baseClient) pipelineProcessCmds( + ctx context.Context, cn *pool.Conn, cmds []Cmder, +) (bool, error) { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return true, err + } + + err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return pipelineReadCmds(rd, cmds) + }) + return true, err +} + +func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + if err != nil && !isRedisError(err) { + return err + } + } + return nil +} + +func (c *baseClient) txPipelineProcessCmds( + ctx context.Context, cn *pool.Conn, cmds []Cmder, +) (bool, error) { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return true, err + } + + err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := txPipelineReadQueued(rd, statusCmd, cmds) + if err != nil { + return err + } + + return pipelineReadCmds(rd, cmds) + }) + return false, err +} + +func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder { + if len(cmds) == 0 { + panic("not reached") + } + cmdCopy := make([]Cmder, len(cmds)+2) + cmdCopy[0] = NewStatusCmd(ctx, "multi") + copy(cmdCopy[1:], cmds) + cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec") + return cmdCopy +} + +func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for range cmds { + if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { + return err + } + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +//------------------------------------------------------------------------------ + +// Client is a Redis client representing a pool of zero or more +// underlying connections. It's safe for concurrent use by multiple +// goroutines. +type Client struct { + *baseClient + cmdable + hooks + ctx context.Context +} + +// NewClient returns a client to the Redis Server specified by Options. +func NewClient(opt *Options) *Client { + opt.init() + + c := Client{ + baseClient: newBaseClient(opt, newConnPool(opt)), + ctx: context.Background(), + } + c.cmdable = c.Process + + return &c +} + +func (c *Client) clone() *Client { + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + return &clone +} + +func (c *Client) WithTimeout(timeout time.Duration) *Client { + clone := c.clone() + clone.baseClient = c.baseClient.withTimeout(timeout) + return clone +} + +func (c *Client) Context() context.Context { + return c.ctx +} + +func (c *Client) WithContext(ctx context.Context) *Client { + if ctx == nil { + panic("nil context") + } + clone := c.clone() + clone.ctx = ctx + return clone +} + +func (c *Client) Conn(ctx context.Context) *Conn { + return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool)) +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *Client) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) +} + +func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) +} + +// Options returns read-only Options that were used to create the client. +func (c *Client) Options() *Options { + return c.opt +} + +type PoolStats pool.Stats + +// PoolStats returns connection pool stats. +func (c *Client) PoolStats() *PoolStats { + stats := c.connPool.Stats() + return (*PoolStats)(stats) +} + +func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Client) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *Client) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *Client) pubSub() *PubSub { + pubsub := &PubSub{ + opt: c.opt, + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + return c.newConn(ctx) + }, + closeConn: c.connPool.CloseConn, + } + pubsub.init() + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +// Note that this method does not wait on a response from Redis, so the +// subscription may not be active immediately. To force the connection to wait, +// you may call the Receive() method on the returned *PubSub like so: +// +// sub := client.Subscribe(queryResp) +// iface, err := sub.Receive() +// if err != nil { +// // handle error +// } +// +// // Should be *Subscription, but others are possible if other actions have been +// // taken on sub since it was created. +// switch iface.(type) { +// case *Subscription: +// // subscribe succeeded +// case *Message: +// // received first message +// case *Pong: +// // pong received +// default: +// // handle error +// } +// +// ch := sub.Channel() +func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +//------------------------------------------------------------------------------ + +type conn struct { + baseClient + cmdable + statefulCmdable + hooks // TODO: inherit hooks +} + +// Conn represents a single Redis connection rather than a pool of connections. +// Prefer running commands from Client unless there is a specific need +// for a continuous single Redis connection. +type Conn struct { + *conn + ctx context.Context +} + +func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn { + c := Conn{ + conn: &conn{ + baseClient: baseClient{ + opt: opt, + connPool: connPool, + }, + }, + ctx: ctx, + } + c.cmdable = c.Process + c.statefulCmdable = c.Process + return &c +} + +func (c *Conn) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) +} + +func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) +} + +func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Conn) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *Conn) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go new file mode 100644 index 00000000..24cfd499 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/result.go @@ -0,0 +1,180 @@ +package redis + +import "time" + +// NewCmdResult returns a Cmd initialised with val and err for testing. +func NewCmdResult(val interface{}, err error) *Cmd { + var cmd Cmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewSliceResult returns a SliceCmd initialised with val and err for testing. +func NewSliceResult(val []interface{}, err error) *SliceCmd { + var cmd SliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStatusResult returns a StatusCmd initialised with val and err for testing. +func NewStatusResult(val string, err error) *StatusCmd { + var cmd StatusCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewIntResult returns an IntCmd initialised with val and err for testing. +func NewIntResult(val int64, err error) *IntCmd { + var cmd IntCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewDurationResult returns a DurationCmd initialised with val and err for testing. +func NewDurationResult(val time.Duration, err error) *DurationCmd { + var cmd DurationCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewBoolResult returns a BoolCmd initialised with val and err for testing. +func NewBoolResult(val bool, err error) *BoolCmd { + var cmd BoolCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringResult returns a StringCmd initialised with val and err for testing. +func NewStringResult(val string, err error) *StringCmd { + var cmd StringCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewFloatResult returns a FloatCmd initialised with val and err for testing. +func NewFloatResult(val float64, err error) *FloatCmd { + var cmd FloatCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing. +func NewStringSliceResult(val []string, err error) *StringSliceCmd { + var cmd StringSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing. +func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { + var cmd BoolSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing. +func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { + var cmd StringStringMapCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing. +func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { + var cmd StringIntMapCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing. +func NewTimeCmdResult(val time.Time, err error) *TimeCmd { + var cmd TimeCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing. +func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { + var cmd ZSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing. +func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd { + var cmd ZWithKeyCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewScanCmdResult returns a ScanCmd initialised with val and err for testing. +func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { + var cmd ScanCmd + cmd.page = keys + cmd.cursor = cursor + cmd.SetErr(err) + return &cmd +} + +// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing. +func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { + var cmd ClusterSlotsCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing. +func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { + var cmd GeoLocationCmd + cmd.locations = val + cmd.SetErr(err) + return &cmd +} + +// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing. +func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd { + var cmd GeoPosCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing. +func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { + var cmd CommandsInfoCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing. +func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd { + var cmd XMessageSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing. +func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd { + var cmd XStreamSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go new file mode 100644 index 00000000..4df00fc8 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/ring.go @@ -0,0 +1,736 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cespare/xxhash/v2" + rendezvous "github.com/dgryski/go-rendezvous" //nolint + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/rand" +) + +var errRingShardsDown = errors.New("redis: all ring shards are down") + +//------------------------------------------------------------------------------ + +type ConsistentHash interface { + Get(string) string +} + +type rendezvousWrapper struct { + *rendezvous.Rendezvous +} + +func (w rendezvousWrapper) Get(key string) string { + return w.Lookup(key) +} + +func newRendezvous(shards []string) ConsistentHash { + return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)} +} + +//------------------------------------------------------------------------------ + +// RingOptions are used to configure a ring client and should be +// passed to NewRing. +type RingOptions struct { + // Map of name => host:port addresses of ring shards. + Addrs map[string]string + + // NewClient creates a shard client with provided name and options. + NewClient func(name string, opt *Options) *Client + + // Frequency of PING commands sent to check shards availability. + // Shard is considered down after 3 subsequent failed checks. + HeartbeatFrequency time.Duration + + // NewConsistentHash returns a consistent hash that is used + // to distribute keys across the shards. + // + // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8 + // for consistent hashing algorithmic tradeoffs. + NewConsistentHash func(shards []string) ConsistentHash + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + DB int + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config + Limiter Limiter +} + +func (opt *RingOptions) init() { + if opt.NewClient == nil { + opt.NewClient = func(name string, opt *Options) *Client { + return NewClient(opt) + } + } + + if opt.HeartbeatFrequency == 0 { + opt.HeartbeatFrequency = 500 * time.Millisecond + } + + if opt.NewConsistentHash == nil { + opt.NewConsistentHash = newRendezvous + } + + if opt.MaxRetries == -1 { + opt.MaxRetries = 0 + } else if opt.MaxRetries == 0 { + opt.MaxRetries = 3 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *RingOptions) clientOptions() *Options { + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + DB: opt.DB, + + MaxRetries: -1, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + + TLSConfig: opt.TLSConfig, + Limiter: opt.Limiter, + } +} + +//------------------------------------------------------------------------------ + +type ringShard struct { + Client *Client + down int32 +} + +func newRingShard(opt *RingOptions, name, addr string) *ringShard { + clopt := opt.clientOptions() + clopt.Addr = addr + + return &ringShard{ + Client: opt.NewClient(name, clopt), + } +} + +func (shard *ringShard) String() string { + var state string + if shard.IsUp() { + state = "up" + } else { + state = "down" + } + return fmt.Sprintf("%s is %s", shard.Client, state) +} + +func (shard *ringShard) IsDown() bool { + const threshold = 3 + return atomic.LoadInt32(&shard.down) >= threshold +} + +func (shard *ringShard) IsUp() bool { + return !shard.IsDown() +} + +// Vote votes to set shard state and returns true if state was changed. +func (shard *ringShard) Vote(up bool) bool { + if up { + changed := shard.IsDown() + atomic.StoreInt32(&shard.down, 0) + return changed + } + + if shard.IsDown() { + return false + } + + atomic.AddInt32(&shard.down, 1) + return shard.IsDown() +} + +//------------------------------------------------------------------------------ + +type ringShards struct { + opt *RingOptions + + mu sync.RWMutex + hash ConsistentHash + shards map[string]*ringShard // read only + list []*ringShard // read only + numShard int + closed bool +} + +func newRingShards(opt *RingOptions) *ringShards { + shards := make(map[string]*ringShard, len(opt.Addrs)) + list := make([]*ringShard, 0, len(shards)) + + for name, addr := range opt.Addrs { + shard := newRingShard(opt, name, addr) + shards[name] = shard + + list = append(list, shard) + } + + c := &ringShards{ + opt: opt, + + shards: shards, + list: list, + } + c.rebalance() + + return c +} + +func (c *ringShards) List() []*ringShard { + var list []*ringShard + + c.mu.RLock() + if !c.closed { + list = c.list + } + c.mu.RUnlock() + + return list +} + +func (c *ringShards) Hash(key string) string { + key = hashtag.Key(key) + + var hash string + + c.mu.RLock() + if c.numShard > 0 { + hash = c.hash.Get(key) + } + c.mu.RUnlock() + + return hash +} + +func (c *ringShards) GetByKey(key string) (*ringShard, error) { + key = hashtag.Key(key) + + c.mu.RLock() + + if c.closed { + c.mu.RUnlock() + return nil, pool.ErrClosed + } + + if c.numShard == 0 { + c.mu.RUnlock() + return nil, errRingShardsDown + } + + hash := c.hash.Get(key) + if hash == "" { + c.mu.RUnlock() + return nil, errRingShardsDown + } + + shard := c.shards[hash] + c.mu.RUnlock() + + return shard, nil +} + +func (c *ringShards) GetByName(shardName string) (*ringShard, error) { + if shardName == "" { + return c.Random() + } + + c.mu.RLock() + shard := c.shards[shardName] + c.mu.RUnlock() + return shard, nil +} + +func (c *ringShards) Random() (*ringShard, error) { + return c.GetByKey(strconv.Itoa(rand.Int())) +} + +// heartbeat monitors state of each shard in the ring. +func (c *ringShards) Heartbeat(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + ctx := context.Background() + for range ticker.C { + var rebalance bool + + for _, shard := range c.List() { + err := shard.Client.Ping(ctx).Err() + isUp := err == nil || err == pool.ErrPoolTimeout + if shard.Vote(isUp) { + internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard) + rebalance = true + } + } + + if rebalance { + c.rebalance() + } + } +} + +// rebalance removes dead shards from the Ring. +func (c *ringShards) rebalance() { + c.mu.RLock() + shards := c.shards + c.mu.RUnlock() + + liveShards := make([]string, 0, len(shards)) + + for name, shard := range shards { + if shard.IsUp() { + liveShards = append(liveShards, name) + } + } + + hash := c.opt.NewConsistentHash(liveShards) + + c.mu.Lock() + c.hash = hash + c.numShard = len(liveShards) + c.mu.Unlock() +} + +func (c *ringShards) Len() int { + c.mu.RLock() + l := c.numShard + c.mu.RUnlock() + return l +} + +func (c *ringShards) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, shard := range c.shards { + if err := shard.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + c.hash = nil + c.shards = nil + c.list = nil + + return firstErr +} + +//------------------------------------------------------------------------------ + +type ring struct { + opt *RingOptions + shards *ringShards + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// Ring is a Redis client that uses consistent hashing to distribute +// keys across multiple Redis servers (shards). It's safe for +// concurrent use by multiple goroutines. +// +// Ring monitors the state of each shard and removes dead shards from +// the ring. When a shard comes online it is added back to the ring. This +// gives you maximum availability and partition tolerance, but no +// consistency between different shards or even clients. Each client +// uses shards that are available to the client and does not do any +// coordination when shard state is changed. +// +// Ring should be used when you need multiple Redis servers for caching +// and can tolerate losing data when one of the servers dies. +// Otherwise you should use Redis Cluster. +type Ring struct { + *ring + cmdable + hooks + ctx context.Context +} + +func NewRing(opt *RingOptions) *Ring { + opt.init() + + ring := Ring{ + ring: &ring{ + opt: opt, + shards: newRingShards(opt), + }, + ctx: context.Background(), + } + + ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) + ring.cmdable = ring.Process + + go ring.shards.Heartbeat(opt.HeartbeatFrequency) + + return &ring +} + +func (c *Ring) Context() context.Context { + return c.ctx +} + +func (c *Ring) WithContext(ctx context.Context) *Ring { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *Ring) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +// Options returns read-only Options that were used to create the client. +func (c *Ring) Options() *RingOptions { + return c.opt +} + +func (c *Ring) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +// PoolStats returns accumulated connection pool stats. +func (c *Ring) PoolStats() *PoolStats { + shards := c.shards.List() + var acc PoolStats + for _, shard := range shards { + s := shard.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + } + return &acc +} + +// Len returns the current number of shards in the ring. +func (c *Ring) Len() int { + return c.shards.Len() +} + +// Subscribe subscribes the client to the specified channels. +func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub { + if len(channels) == 0 { + panic("at least one channel is required") + } + + shard, err := c.shards.GetByKey(channels[0]) + if err != nil { + // TODO: return PubSub with sticky error + panic(err) + } + return shard.Client.Subscribe(ctx, channels...) +} + +// PSubscribe subscribes the client to the given patterns. +func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub { + if len(channels) == 0 { + panic("at least one channel is required") + } + + shard, err := c.shards.GetByKey(channels[0]) + if err != nil { + // TODO: return PubSub with sticky error + panic(err) + } + return shard.Client.PSubscribe(ctx, channels...) +} + +// ForEachShard concurrently calls the fn on each live shard in the ring. +// It returns the first error if any. +func (c *Ring) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + shards := c.shards.List() + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, shard := range shards { + if shard.IsDown() { + continue + } + + wg.Add(1) + go func(shard *ringShard) { + defer wg.Done() + err := fn(ctx, shard.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(shard) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + shards := c.shards.List() + var firstErr error + for _, shard := range shards { + cmdsInfo, err := shard.Client.Command(ctx).Result() + if err == nil { + return cmdsInfo, nil + } + if firstErr == nil { + firstErr = err + } + } + if firstErr == nil { + return nil, errRingShardsDown + } + return nil, firstErr +} + +func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(ctx) + if err != nil { + return nil + } + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(ctx, "info for cmd=%s not found", name) + } + return info +} + +func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + pos := cmdFirstKeyPos(cmd, cmdInfo) + if pos == 0 { + return c.shards.Random() + } + firstKey := cmd.stringArg(pos) + return c.shards.GetByKey(firstKey) +} + +func (c *Ring) process(ctx context.Context, cmd Cmder) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + shard, err := c.cmdShard(ctx, cmd) + if err != nil { + return err + } + + lastErr = shard.Client.Process(ctx, cmd) + if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) { + return lastErr + } + } + return lastErr +} + +func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Ring) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, false) + }) +} + +func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *Ring) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, true) + }) +} + +func (c *Ring) generalProcessPipeline( + ctx context.Context, cmds []Cmder, tx bool, +) error { + cmdsMap := make(map[string][]Cmder) + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) + if hash != "" { + hash = c.shards.Hash(hash) + } + cmdsMap[hash] = append(cmdsMap[hash], cmd) + } + + var wg sync.WaitGroup + for hash, cmds := range cmdsMap { + wg.Add(1) + go func(hash string, cmds []Cmder) { + defer wg.Done() + + _ = c.processShardPipeline(ctx, hash, cmds, tx) + }(hash, cmds) + } + + wg.Wait() + return cmdsFirstErr(cmds) +} + +func (c *Ring) processShardPipeline( + ctx context.Context, hash string, cmds []Cmder, tx bool, +) error { + // TODO: retry? + shard, err := c.shards.GetByName(hash) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + if tx { + return shard.Client.processTxPipeline(ctx, cmds) + } + return shard.Client.processPipeline(ctx, cmds) +} + +func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + var shards []*ringShard + for _, key := range keys { + if key != "" { + shard, err := c.shards.GetByKey(hashtag.Key(key)) + if err != nil { + return err + } + + shards = append(shards, shard) + } + } + + if len(shards) == 0 { + return fmt.Errorf("redis: Watch requires at least one shard") + } + + if len(shards) > 1 { + for _, shard := range shards[1:] { + if shard.Client != shards[0].Client { + err := fmt.Errorf("redis: Watch requires all keys to be in the same shard") + return err + } + } + } + + return shards[0].Client.Watch(ctx, fn, keys...) +} + +// Close closes the ring client, releasing any open resources. +// +// It is rare to Close a Ring, as the Ring is meant to be long-lived +// and shared between many goroutines. +func (c *Ring) Close() error { + return c.shards.Close() +} diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go new file mode 100644 index 00000000..5cab18d6 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/script.go @@ -0,0 +1,65 @@ +package redis + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +type Scripter interface { + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptLoad(ctx context.Context, script string) *StringCmd +} + +var ( + _ Scripter = (*Client)(nil) + _ Scripter = (*Ring)(nil) + _ Scripter = (*ClusterClient)(nil) +) + +type Script struct { + src, hash string +} + +func NewScript(src string) *Script { + h := sha1.New() + _, _ = io.WriteString(h, src) + return &Script{ + src: src, + hash: hex.EncodeToString(h.Sum(nil)), + } +} + +func (s *Script) Hash() string { + return s.hash +} + +func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd { + return c.ScriptLoad(ctx, s.src) +} + +func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd { + return c.ScriptExists(ctx, s.hash) +} + +func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + return c.Eval(ctx, s.src, keys, args...) +} + +func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + return c.EvalSha(ctx, s.hash, keys, args...) +} + +// Run optimistically uses EVALSHA to run the script. If script does not exist +// it is retried using EVAL. +func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + r := s.EvalSha(ctx, c, keys, args...) + if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { + return s.Eval(ctx, c, keys, args...) + } + return r +} diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go new file mode 100644 index 00000000..ec6221dc --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/sentinel.go @@ -0,0 +1,796 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "net" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/rand" +) + +//------------------------------------------------------------------------------ + +// FailoverOptions are used to configure a failover client and should +// be passed to NewFailoverClient. +type FailoverOptions struct { + // The master name. + MasterName string + // A seed list of host:port addresses of sentinel nodes. + SentinelAddrs []string + + // If specified with SentinelPassword, enables ACL-based authentication (via + // AUTH ). + SentinelUsername string + // Sentinel password from "requirepass " (if enabled) in Sentinel + // configuration, or, if SentinelUsername is also supplied, used for ACL-based + // authentication. + SentinelPassword string + + // Allows routing read-only commands to the closest master or slave node. + // This option only works with NewFailoverClusterClient. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // This option only works with NewFailoverClusterClient. + RouteRandomly bool + + // Route all commands to slave read-only nodes. + SlaveOnly bool + + // Use slaves disconnected with master when cannot get connected slaves + // Now, this option only works in RandomSlaveAddr function. + UseDisconnectedSlaves bool + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + DB int + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *FailoverOptions) clientOptions() *Options { + return &Options{ + Addr: "FailoverClient", + + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + DB: opt.DB, + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +func (opt *FailoverOptions) sentinelOptions(addr string) *Options { + return &Options{ + Addr: addr, + + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + DB: 0, + Username: opt.SentinelUsername, + Password: opt.SentinelPassword, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +func (opt *FailoverOptions) clusterOptions() *ClusterOptions { + return &ClusterOptions{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + + MaxRedirects: opt.MaxRetries, + + RouteByLatency: opt.RouteByLatency, + RouteRandomly: opt.RouteRandomly, + + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +// NewFailoverClient returns a Redis client that uses Redis Sentinel +// for automatic failover. It's safe for concurrent use by multiple +// goroutines. +func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + if failoverOpt.RouteByLatency { + panic("to route commands by latency, use NewFailoverClusterClient") + } + if failoverOpt.RouteRandomly { + panic("to route commands randomly, use NewFailoverClusterClient") + } + + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) + copy(sentinelAddrs, failoverOpt.SentinelAddrs) + + rand.Shuffle(len(sentinelAddrs), func(i, j int) { + sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i] + }) + + failover := &sentinelFailover{ + opt: failoverOpt, + sentinelAddrs: sentinelAddrs, + } + + opt := failoverOpt.clientOptions() + opt.Dialer = masterSlaveDialer(failover) + opt.init() + + connPool := newConnPool(opt) + + failover.mu.Lock() + failover.onFailover = func(ctx context.Context, addr string) { + _ = connPool.Filter(func(cn *pool.Conn) bool { + return cn.RemoteAddr().String() != addr + }) + } + failover.mu.Unlock() + + c := Client{ + baseClient: newBaseClient(opt, connPool), + ctx: context.Background(), + } + c.cmdable = c.Process + c.onClose = failover.Close + + return &c +} + +func masterSlaveDialer( + failover *sentinelFailover, +) func(ctx context.Context, network, addr string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var addr string + var err error + + if failover.opt.SlaveOnly { + addr, err = failover.RandomSlaveAddr(ctx) + } else { + addr, err = failover.MasterAddr(ctx) + if err == nil { + failover.trySwitchMaster(ctx, addr) + } + } + if err != nil { + return nil, err + } + if failover.opt.Dialer != nil { + return failover.opt.Dialer(ctx, network, addr) + } + + netDialer := &net.Dialer{ + Timeout: failover.opt.DialTimeout, + KeepAlive: 5 * time.Minute, + } + if failover.opt.TLSConfig == nil { + return netDialer.DialContext(ctx, network, addr) + } + return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig) + } +} + +//------------------------------------------------------------------------------ + +// SentinelClient is a client for a Redis Sentinel. +type SentinelClient struct { + *baseClient + hooks + ctx context.Context +} + +func NewSentinelClient(opt *Options) *SentinelClient { + opt.init() + c := &SentinelClient{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(opt), + }, + ctx: context.Background(), + } + return c +} + +func (c *SentinelClient) Context() context.Context { + return c.ctx +} + +func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.ctx = ctx + return &clone +} + +func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *SentinelClient) pubSub() *PubSub { + pubsub := &PubSub{ + opt: c.opt, + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + return c.newConn(ctx) + }, + closeConn: c.connPool.CloseConn, + } + pubsub.init() + return pubsub +} + +// Ping is used to test if a connection is still alive, or to +// measure latency. +func (c *SentinelClient) Ping(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "ping") + _ = c.Process(ctx, cmd) + return cmd +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Failover forces a failover as if the master was not reachable, and without +// asking for agreement to other Sentinels. +func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd { + cmd := NewStatusCmd(ctx, "sentinel", "failover", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Reset resets all the masters with matching name. The pattern argument is a +// glob-style pattern. The reset process clears any previous state in a master +// (including a failover in progress), and removes every slave and sentinel +// already discovered and associated with the master. +func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd { + cmd := NewIntCmd(ctx, "sentinel", "reset", pattern) + _ = c.Process(ctx, cmd) + return cmd +} + +// FlushConfig forces Sentinel to rewrite its configuration on disk, including +// the current Sentinel state. +func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "sentinel", "flushconfig") + _ = c.Process(ctx, cmd) + return cmd +} + +// Master shows the state and info of the specified master. +func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Masters shows a list of monitored masters and their state. +func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "masters") + _ = c.Process(ctx, cmd) + return cmd +} + +// Slaves shows a list of slaves for the specified master and their state. +func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "slaves", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// CkQuorum checks if the current Sentinel configuration is able to reach the +// quorum needed to failover a master, and the majority needed to authorize the +// failover. This command should be used in monitoring systems to check if a +// Sentinel deployment is ok. +func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Monitor tells the Sentinel to start monitoring a new master with the specified +// name, ip, port, and quorum. +func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum) + _ = c.Process(ctx, cmd) + return cmd +} + +// Set is used in order to change configuration parameters of a specific master. +func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value) + _ = c.Process(ctx, cmd) + return cmd +} + +// Remove is used in order to remove the specified master: the master will no +// longer be monitored, and will totally be removed from the internal state of +// the Sentinel. +func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "remove", name) + _ = c.Process(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type sentinelFailover struct { + opt *FailoverOptions + + sentinelAddrs []string + + onFailover func(ctx context.Context, addr string) + onUpdate func(ctx context.Context) + + mu sync.RWMutex + _masterAddr string + sentinel *SentinelClient + pubsub *PubSub +} + +func (c *sentinelFailover) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.sentinel != nil { + return c.closeSentinel() + } + return nil +} + +func (c *sentinelFailover) closeSentinel() error { + firstErr := c.pubsub.Close() + c.pubsub = nil + + err := c.sentinel.Close() + if err != nil && firstErr == nil { + firstErr = err + } + c.sentinel = nil + + return firstErr +} + +func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) { + if c.opt == nil { + return "", errors.New("opt is nil") + } + + addresses, err := c.slaveAddrs(ctx, false) + if err != nil { + return "", err + } + + if len(addresses) == 0 && c.opt.UseDisconnectedSlaves { + addresses, err = c.slaveAddrs(ctx, true) + if err != nil { + return "", err + } + } + + if len(addresses) == 0 { + return c.MasterAddr(ctx) + } + return addresses[rand.Intn(len(addresses))], nil +} + +func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { + c.mu.RLock() + sentinel := c.sentinel + c.mu.RUnlock() + + if sentinel != nil { + addr := c.getMasterAddr(ctx, sentinel) + if addr != "" { + return addr, nil + } + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.sentinel != nil { + addr := c.getMasterAddr(ctx, c.sentinel) + if addr != "" { + return addr, nil + } + _ = c.closeSentinel() + } + + for i, sentinelAddr := range c.sentinelAddrs { + sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) + + masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", + c.opt.MasterName, err) + _ = sentinel.Close() + continue + } + + // Push working sentinel to the top. + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinel) + + addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) + return addr, nil + } + + return "", errors.New("redis: all sentinels specified in configuration are unreachable") +} + +func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { + c.mu.RLock() + sentinel := c.sentinel + c.mu.RUnlock() + + if sentinel != nil { + addrs := c.getSlaveAddrs(ctx, sentinel) + if len(addrs) > 0 { + return addrs, nil + } + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.sentinel != nil { + addrs := c.getSlaveAddrs(ctx, c.sentinel) + if len(addrs) > 0 { + return addrs, nil + } + _ = c.closeSentinel() + } + + var sentinelReachable bool + + for i, sentinelAddr := range c.sentinelAddrs { + sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) + + slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s", + c.opt.MasterName, err) + _ = sentinel.Close() + continue + } + sentinelReachable = true + addrs := parseSlaveAddrs(slaves, useDisconnected) + if len(addrs) == 0 { + continue + } + // Push working sentinel to the top. + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinel) + + return addrs, nil + } + + if sentinelReachable { + return []string{}, nil + } + return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable") +} + +func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string { + addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s", + c.opt.MasterName, err) + return "" + } + return net.JoinHostPort(addr[0], addr[1]) +} + +func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string { + addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s", + c.opt.MasterName, err) + return []string{} + } + return parseSlaveAddrs(addrs, false) +} + +func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string { + nodes := make([]string, 0, len(addrs)) + for _, node := range addrs { + ip := "" + port := "" + flags := []string{} + lastkey := "" + isDown := false + + for _, key := range node.([]interface{}) { + switch lastkey { + case "ip": + ip = key.(string) + case "port": + port = key.(string) + case "flags": + flags = strings.Split(key.(string), ",") + } + lastkey = key.(string) + } + + for _, flag := range flags { + switch flag { + case "s_down", "o_down": + isDown = true + case "disconnected": + if !keepDisconnected { + isDown = true + } + } + } + + if !isDown { + nodes = append(nodes, net.JoinHostPort(ip, port)) + } + } + + return nodes +} + +func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { + c.mu.RLock() + currentAddr := c._masterAddr //nolint:ifshort + c.mu.RUnlock() + + if addr == currentAddr { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + if addr == c._masterAddr { + return + } + c._masterAddr = addr + + internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", + c.opt.MasterName, addr) + if c.onFailover != nil { + c.onFailover(ctx, addr) + } +} + +func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) { + if c.sentinel != nil { + panic("not reached") + } + c.sentinel = sentinel + c.discoverSentinels(ctx) + + c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done") + go c.listen(c.pubsub) +} + +func (c *sentinelFailover) discoverSentinels(ctx context.Context) { + sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err) + return + } + for _, sentinel := range sentinels { + vals := sentinel.([]interface{}) + var ip, port string + for i := 0; i < len(vals); i += 2 { + key := vals[i].(string) + switch key { + case "ip": + ip = vals[i+1].(string) + case "port": + port = vals[i+1].(string) + } + } + if ip != "" && port != "" { + sentinelAddr := net.JoinHostPort(ip, port) + if !contains(c.sentinelAddrs, sentinelAddr) { + internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q", + sentinelAddr, c.opt.MasterName) + c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr) + } + } + } +} + +func (c *sentinelFailover) listen(pubsub *PubSub) { + ctx := context.TODO() + + if c.onUpdate != nil { + c.onUpdate(ctx) + } + + ch := pubsub.Channel() + for msg := range ch { + if msg.Channel == "+switch-master" { + parts := strings.Split(msg.Payload, " ") + if parts[0] != c.opt.MasterName { + internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0]) + continue + } + addr := net.JoinHostPort(parts[3], parts[4]) + c.trySwitchMaster(pubsub.getContext(), addr) + } + + if c.onUpdate != nil { + c.onUpdate(ctx) + } + } +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} + +//------------------------------------------------------------------------------ + +// NewFailoverClusterClient returns a client that supports routing read-only commands +// to a slave node. +func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) + copy(sentinelAddrs, failoverOpt.SentinelAddrs) + + failover := &sentinelFailover{ + opt: failoverOpt, + sentinelAddrs: sentinelAddrs, + } + + opt := failoverOpt.clusterOptions() + opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { + masterAddr, err := failover.MasterAddr(ctx) + if err != nil { + return nil, err + } + + nodes := []ClusterNode{{ + Addr: masterAddr, + }} + + slaveAddrs, err := failover.slaveAddrs(ctx, false) + if err != nil { + return nil, err + } + + for _, slaveAddr := range slaveAddrs { + nodes = append(nodes, ClusterNode{ + Addr: slaveAddr, + }) + } + + slots := []ClusterSlot{ + { + Start: 0, + End: 16383, + Nodes: nodes, + }, + } + return slots, nil + } + + c := NewClusterClient(opt) + + failover.mu.Lock() + failover.onUpdate = func(ctx context.Context) { + c.ReloadState(ctx) + } + failover.mu.Unlock() + + return c +} diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go new file mode 100644 index 00000000..8c9d8720 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/tx.go @@ -0,0 +1,149 @@ +package redis + +import ( + "context" + + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// TxFailedErr transaction redis failed. +const TxFailedErr = proto.RedisError("redis: transaction failed") + +// Tx implements Redis transactions as described in +// http://redis.io/topics/transactions. It's NOT safe for concurrent use +// by multiple goroutines, because Exec resets list of watched keys. +// +// If you don't need WATCH, use Pipeline instead. +type Tx struct { + baseClient + cmdable + statefulCmdable + hooks + ctx context.Context +} + +func (c *Client) newTx(ctx context.Context) *Tx { + tx := Tx{ + baseClient: baseClient{ + opt: c.opt, + connPool: pool.NewStickyConnPool(c.connPool), + }, + hooks: c.hooks.clone(), + ctx: ctx, + } + tx.init() + return &tx +} + +func (c *Tx) init() { + c.cmdable = c.Process + c.statefulCmdable = c.Process +} + +func (c *Tx) Context() context.Context { + return c.ctx +} + +func (c *Tx) WithContext(ctx context.Context) *Tx { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.init() + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +func (c *Tx) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +// Watch prepares a transaction and marks the keys to be watched +// for conditional execution if there are any keys. +// +// The transaction is automatically closed when fn exits. +func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + tx := c.newTx(ctx) + defer tx.Close(ctx) + if len(keys) > 0 { + if err := tx.Watch(ctx, keys...).Err(); err != nil { + return err + } + } + return fn(tx) +} + +// Close closes the transaction, releasing any open resources. +func (c *Tx) Close(ctx context.Context) error { + _ = c.Unwatch(ctx).Err() + return c.baseClient.Close() +} + +// Watch marks the keys to be watched for conditional execution +// of a transaction. +func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "watch" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Unwatch flushes all the previously watched keys for a transaction. +func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unwatch" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined. +func (c *Tx) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: func(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) + }, + } + pipe.init() + return &pipe +} + +// Pipelined executes commands queued in the fn outside of the transaction. +// Use TxPipelined if you need transactional behavior. +func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +// TxPipelined executes commands queued in the fn in the transaction. +// +// When using WATCH, EXEC will execute commands only if the watched keys +// were not modified, allowing for a check-and-set mechanism. +// +// Exec always returns list of commands. If transaction fails +// TxFailedErr is returned. Otherwise Exec returns an error of the first +// failed command or nil. +func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined. +func (c *Tx) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: func(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) + }, + } + pipe.init() + return &pipe +} diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go new file mode 100644 index 00000000..c89b3e5d --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/universal.go @@ -0,0 +1,215 @@ +package redis + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +// UniversalOptions information is required by UniversalClient to establish +// connections. +type UniversalOptions struct { + // Either a single address or a seed list of host:port addresses + // of cluster/sentinel nodes. + Addrs []string + + // Database to be selected after connecting to the server. + // Only single-node and failover clients. + DB int + + // Common options. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + SentinelUsername string + SentinelPassword string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config + + // Only cluster clients. + + MaxRedirects int + ReadOnly bool + RouteByLatency bool + RouteRandomly bool + + // The sentinel master name. + // Only failover clients. + + MasterName string +} + +// Cluster returns cluster options created from the universal options. +func (o *UniversalOptions) Cluster() *ClusterOptions { + if len(o.Addrs) == 0 { + o.Addrs = []string{"127.0.0.1:6379"} + } + + return &ClusterOptions{ + Addrs: o.Addrs, + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + Username: o.Username, + Password: o.Password, + + MaxRedirects: o.MaxRedirects, + ReadOnly: o.ReadOnly, + RouteByLatency: o.RouteByLatency, + RouteRandomly: o.RouteRandomly, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// Failover returns failover options created from the universal options. +func (o *UniversalOptions) Failover() *FailoverOptions { + if len(o.Addrs) == 0 { + o.Addrs = []string{"127.0.0.1:26379"} + } + + return &FailoverOptions{ + SentinelAddrs: o.Addrs, + MasterName: o.MasterName, + + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + DB: o.DB, + Username: o.Username, + Password: o.Password, + SentinelUsername: o.SentinelUsername, + SentinelPassword: o.SentinelPassword, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// Simple returns basic options created from the universal options. +func (o *UniversalOptions) Simple() *Options { + addr := "127.0.0.1:6379" + if len(o.Addrs) > 0 { + addr = o.Addrs[0] + } + + return &Options{ + Addr: addr, + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + DB: o.DB, + Username: o.Username, + Password: o.Password, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// -------------------------------------------------------------------- + +// UniversalClient is an abstract client which - based on the provided options - +// represents either a ClusterClient, a FailoverClient, or a single-node Client. +// This can be useful for testing cluster-specific applications locally or having different +// clients in different environments. +type UniversalClient interface { + Cmdable + Context() context.Context + AddHook(Hook) + Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error + Do(ctx context.Context, args ...interface{}) *Cmd + Process(ctx context.Context, cmd Cmder) error + Subscribe(ctx context.Context, channels ...string) *PubSub + PSubscribe(ctx context.Context, channels ...string) *PubSub + Close() error + PoolStats() *PoolStats +} + +var ( + _ UniversalClient = (*Client)(nil) + _ UniversalClient = (*ClusterClient)(nil) + _ UniversalClient = (*Ring)(nil) +) + +// NewUniversalClient returns a new multi client. The type of the returned client depends +// on the following conditions: +// +// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. +// 2. if the number of Addrs is two or more, a ClusterClient is returned. +// 3. Otherwise, a single-node Client is returned. +func NewUniversalClient(opts *UniversalOptions) UniversalClient { + if opts.MasterName != "" { + return NewFailoverClient(opts.Failover()) + } else if len(opts.Addrs) > 1 { + return NewClusterClient(opts.Cluster()) + } + return NewClient(opts.Simple()) +} diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go new file mode 100644 index 00000000..112c9a2d --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/version.go @@ -0,0 +1,6 @@ +package redis + +// Version is the current release version. +func Version() string { + return "8.11.5" +} diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 00000000..e8134ec8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1435 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +var ( + wktType = reflect.TypeOf((*isWkt)(nil)).Elem() + messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if v.Type().Implements(wktType) { + wkt := v.Interface().(isWkt) + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if v.Type().Implements(messageType) { + return checkRequiredFields(v.Interface().(proto.Message)) + } + return nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore new file mode 100644 index 00000000..09573e01 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE new file mode 100644 index 00000000..35dbc252 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 00000000..32966f59 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md new file mode 100644 index 00000000..30f2f2a6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -0,0 +1,138 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. + +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. + +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers + +## Compliance + +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +**BREAKING CHANGES:*** +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 00000000..b08402c3 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md new file mode 100644 index 00000000..afbfc4e4 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -0,0 +1,135 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 00000000..364cec87 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,269 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go new file mode 100644 index 00000000..eac023fc --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -0,0 +1,142 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// SigningMethodECDSA implements the ECDSA family of signing methods. +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go new file mode 100644 index 00000000..5700636d --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 00000000..07d3aacd --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 00000000..cdb5e68e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go new file mode 100644 index 00000000..10ac8835 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -0,0 +1,112 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// NewValidationError is a helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// ValidationError represents an error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Error is the implementation of the err interface. +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go new file mode 100644 index 00000000..011f68a2 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements token signing for the SigningMethod. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go new file mode 100644 index 00000000..2700d64a --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -0,0 +1,151 @@ +package jwt + +import ( + "encoding/json" + "errors" + "time" + // "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) + case json.Number: + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { + case float64: + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) + case json.Number: + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { + case float64: + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) + case json.Number: + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go new file mode 100644 index 00000000..f19835d2 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -0,0 +1,52 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go new file mode 100644 index 00000000..c0a6f692 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -0,0 +1,177 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool +} + +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the key for validating. +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 00000000..6ea6f952 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go new file mode 100644 index 00000000..b910b19c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSA implements the RSA family of signing methods. +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements token signing for the SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go new file mode 100644 index 00000000..4fd6f9e6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -0,0 +1,143 @@ +//go:build go1.4 +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go new file mode 100644 index 00000000..1966c450 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go new file mode 100644 index 00000000..241ae9c6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -0,0 +1,46 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod can be used add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod retrieves a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 00000000..53745d51 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go new file mode 100644 index 00000000..786b275c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -0,0 +1,143 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + +// DecodeStrict will switch the codec used for decoding JWTs into strict mode. +// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5. +// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use strict decoding, set this boolean to `true` prior to using this package. +var DecodeStrict bool + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// Token represents a JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token with the specified signing method and an empty map of claims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creates a new Token with the specified signing method and claims. +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + var jsonValue []byte + + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + header := EncodeSegment(jsonValue) + + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + encoding = base64.URLEncoding + } + + if DecodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 00000000..ac8e140e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,145 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/google/gnostic/LICENSE b/vendor/github.com/google/gnostic-models/LICENSE similarity index 100% rename from vendor/github.com/google/gnostic/LICENSE rename to vendor/github.com/google/gnostic-models/LICENSE diff --git a/vendor/github.com/google/gnostic/compiler/README.md b/vendor/github.com/google/gnostic-models/compiler/README.md similarity index 100% rename from vendor/github.com/google/gnostic/compiler/README.md rename to vendor/github.com/google/gnostic-models/compiler/README.md diff --git a/vendor/github.com/google/gnostic/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/context.go rename to vendor/github.com/google/gnostic-models/compiler/context.go diff --git a/vendor/github.com/google/gnostic/compiler/error.go b/vendor/github.com/google/gnostic-models/compiler/error.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/error.go rename to vendor/github.com/google/gnostic-models/compiler/error.go diff --git a/vendor/github.com/google/gnostic/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go similarity index 97% rename from vendor/github.com/google/gnostic/compiler/extensions.go rename to vendor/github.com/google/gnostic-models/compiler/extensions.go index 5b5a916d..250c81e8 100644 --- a/vendor/github.com/google/gnostic/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -24,7 +24,7 @@ import ( "github.com/golang/protobuf/ptypes/any" yaml "gopkg.in/yaml.v3" - extensions "github.com/google/gnostic/extensions" + extensions "github.com/google/gnostic-models/extensions" ) // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. diff --git a/vendor/github.com/google/gnostic/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go similarity index 99% rename from vendor/github.com/google/gnostic/compiler/helpers.go rename to vendor/github.com/google/gnostic-models/compiler/helpers.go index 97ffaa51..975d65e8 100644 --- a/vendor/github.com/google/gnostic/compiler/helpers.go +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -22,7 +22,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/jsonschema" + "github.com/google/gnostic-models/jsonschema" ) // compiler helper functions, usually called from generated code diff --git a/vendor/github.com/google/gnostic/compiler/main.go b/vendor/github.com/google/gnostic-models/compiler/main.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/main.go rename to vendor/github.com/google/gnostic-models/compiler/main.go diff --git a/vendor/github.com/google/gnostic/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go similarity index 100% rename from vendor/github.com/google/gnostic/compiler/reader.go rename to vendor/github.com/google/gnostic-models/compiler/reader.go diff --git a/vendor/github.com/google/gnostic/extensions/README.md b/vendor/github.com/google/gnostic-models/extensions/README.md similarity index 100% rename from vendor/github.com/google/gnostic/extensions/README.md rename to vendor/github.com/google/gnostic-models/extensions/README.md diff --git a/vendor/github.com/google/gnostic/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/extensions/extension.pb.go rename to vendor/github.com/google/gnostic-models/extensions/extension.pb.go index a6a4ccca..a71df8ab 100644 --- a/vendor/github.com/google/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: extensions/extension.proto package gnostic_extension_v1 diff --git a/vendor/github.com/google/gnostic/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto similarity index 100% rename from vendor/github.com/google/gnostic/extensions/extension.proto rename to vendor/github.com/google/gnostic-models/extensions/extension.proto diff --git a/vendor/github.com/google/gnostic/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go similarity index 100% rename from vendor/github.com/google/gnostic/extensions/extensions.go rename to vendor/github.com/google/gnostic-models/extensions/extensions.go diff --git a/vendor/github.com/google/gnostic/jsonschema/README.md b/vendor/github.com/google/gnostic-models/jsonschema/README.md similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/README.md rename to vendor/github.com/google/gnostic-models/jsonschema/README.md diff --git a/vendor/github.com/google/gnostic/jsonschema/base.go b/vendor/github.com/google/gnostic-models/jsonschema/base.go similarity index 90% rename from vendor/github.com/google/gnostic/jsonschema/base.go rename to vendor/github.com/google/gnostic-models/jsonschema/base.go index 0af8b148..5fcc4885 100644 --- a/vendor/github.com/google/gnostic/jsonschema/base.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/base.go @@ -1,3 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. @@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic-models/jsonschema/display.go similarity index 92% rename from vendor/github.com/google/gnostic/jsonschema/display.go rename to vendor/github.com/google/gnostic-models/jsonschema/display.go index 8677ed49..028a760a 100644 --- a/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/display.go @@ -46,23 +46,8 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } - if schema.ReadOnly != nil && *schema.ReadOnly { - result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) - } - if schema.WriteOnly != nil && *schema.WriteOnly { - result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) - } if schema.ID != nil { - switch strings.TrimSuffix(*schema.Schema, "#") { - case "http://json-schema.org/draft-04/schema#": - fallthrough - case "#": - fallthrough - case "": - result += indent + "id: " + *(schema.ID) + "\n" - default: - result += indent + "$id: " + *(schema.ID) + "\n" - } + result += indent + "id: " + *(schema.ID) + "\n" } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go similarity index 97% rename from vendor/github.com/google/gnostic/jsonschema/models.go rename to vendor/github.com/google/gnostic-models/jsonschema/models.go index 0d877249..4781bdc5 100644 --- a/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -23,11 +23,9 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers - ReadOnly *bool - WriteOnly *bool + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/vendor/github.com/google/gnostic/jsonschema/operations.go b/vendor/github.com/google/gnostic-models/jsonschema/operations.go similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/operations.go rename to vendor/github.com/google/gnostic-models/jsonschema/operations.go diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go similarity index 99% rename from vendor/github.com/google/gnostic/jsonschema/reader.go rename to vendor/github.com/google/gnostic-models/jsonschema/reader.go index a909a341..b8583d46 100644 --- a/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -165,6 +165,7 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil } return nil diff --git a/vendor/github.com/google/gnostic/jsonschema/schema.json b/vendor/github.com/google/gnostic-models/jsonschema/schema.json similarity index 100% rename from vendor/github.com/google/gnostic/jsonschema/schema.json rename to vendor/github.com/google/gnostic-models/jsonschema/schema.json diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go similarity index 92% rename from vendor/github.com/google/gnostic/jsonschema/writer.go rename to vendor/github.com/google/gnostic-models/jsonschema/writer.go index 15b1f905..340dc5f9 100644 --- a/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -16,7 +16,6 @@ package jsonschema import ( "fmt" - "strings" "gopkg.in/yaml.v3" ) @@ -34,11 +33,7 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - if value.Tag == "!!bool" { - result += value.Value - } else { - result += "\"" + value.Value + "\"" - } + result += "\"" + value.Value + "\"" case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -63,11 +58,7 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - if item.Tag == "!!bool" { - result += innerIndent + item.Value - } else { - result += innerIndent + "\"" + item.Value + "\"" - } + result += innerIndent + "\"" + item.Value + "\"" case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -269,26 +260,11 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - switch strings.TrimSuffix(*schema.Schema, "#") { - case "http://json-schema.org/draft-04/schema": - fallthrough - case "#": - fallthrough - case "": - content = appendPair(content, "id", nodeForString(*schema.ID)) - default: - content = appendPair(content, "$id", nodeForString(*schema.ID)) - } + content = appendPair(content, "id", nodeForString(*schema.ID)) } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } - if schema.ReadOnly != nil && *schema.ReadOnly { - content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) - } - if schema.WriteOnly != nil && *schema.WriteOnly { - content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) - } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index 28c2777d..d71fe6d5 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). @@ -7887,12 +7887,7 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) - } - } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 06b60157..65c4c913 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv2/OpenAPIv2.proto package openapi_v2 diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto diff --git a/vendor/github.com/google/gnostic/openapiv2/README.md b/vendor/github.com/google/gnostic-models/openapiv2/README.md similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/README.md rename to vendor/github.com/google/gnostic-models/openapiv2/README.md diff --git a/vendor/github.com/google/gnostic/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go similarity index 96% rename from vendor/github.com/google/gnostic/openapiv2/document.go rename to vendor/github.com/google/gnostic-models/openapiv2/document.go index 0021ae87..e96ac0d6 100644 --- a/vendor/github.com/google/gnostic/openapiv2/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -17,7 +17,7 @@ package openapi_v2 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. diff --git a/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json similarity index 100% rename from vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json rename to vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index d54a84db..4b1131ce 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). @@ -8560,12 +8560,7 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) - } - } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 90a56f55..945b8d11 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,13 +6760,12 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, - 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto index 7aede5ed..1be335b8 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; +option go_package = "./openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic-models/openapiv3/README.md similarity index 89% rename from vendor/github.com/google/gnostic/openapiv3/README.md rename to vendor/github.com/google/gnostic-models/openapiv3/README.md index 83603b82..5ee12d92 100644 --- a/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/vendor/github.com/google/gnostic-models/openapiv3/README.md @@ -19,7 +19,3 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). - -### How to rebuild - -`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go similarity index 96% rename from vendor/github.com/google/gnostic/openapiv3/document.go rename to vendor/github.com/google/gnostic-models/openapiv3/document.go index ef10d1d9..1cee4677 100644 --- a/vendor/github.com/google/gnostic/openapiv3/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -17,7 +17,7 @@ package openapi_v3 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go deleted file mode 100644 index ae242f30..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: openapiv3/annotations.proto - -package openapi_v3 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: (*Document)(nil), - Field: 1143, - Name: "openapi.v3.document", - Tag: "bytes,1143,opt,name=document", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*Operation)(nil), - Field: 1143, - Name: "openapi.v3.operation", - Tag: "bytes,1143,opt,name=operation", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.schema", - Tag: "bytes,1143,opt,name=schema", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.property", - Tag: "bytes,1143,opt,name=property", - Filename: "openapiv3/annotations.proto", - }, -} - -// Extension fields to descriptorpb.FileOptions. -var ( - // optional openapi.v3.Document document = 1143; - E_Document = &file_openapiv3_annotations_proto_extTypes[0] -) - -// Extension fields to descriptorpb.MethodOptions. -var ( - // optional openapi.v3.Operation operation = 1143; - E_Operation = &file_openapiv3_annotations_proto_extTypes[1] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // optional openapi.v3.Schema schema = 1143; - E_Schema = &file_openapiv3_annotations_proto_extTypes[2] -) - -// Extension fields to descriptorpb.FieldOptions. -var ( - // optional openapi.v3.Schema property = 1143; - E_Property = &file_openapiv3_annotations_proto_extTypes[3] -) - -var File_openapiv3_annotations_proto protoreflect.FileDescriptor - -var file_openapiv3_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, - 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_openapiv3_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions - (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions - (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions - (*Document)(nil), // 4: openapi.v3.Document - (*Operation)(nil), // 5: openapi.v3.Operation - (*Schema)(nil), // 6: openapi.v3.Schema -} -var file_openapiv3_annotations_proto_depIdxs = []int32{ - 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions - 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions - 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions - 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions - 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document - 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation - 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema - 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 4, // [4:8] is the sub-list for extension type_name - 0, // [0:4] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_openapiv3_annotations_proto_init() } -func file_openapiv3_annotations_proto_init() { - if File_openapiv3_annotations_proto != nil { - return - } - file_openapiv3_OpenAPIv3_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv3_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 4, - NumServices: 0, - }, - GoTypes: file_openapiv3_annotations_proto_goTypes, - DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, - ExtensionInfos: file_openapiv3_annotations_proto_extTypes, - }.Build() - File_openapiv3_annotations_proto = out.File - file_openapiv3_annotations_proto_rawDesc = nil - file_openapiv3_annotations_proto_goTypes = nil - file_openapiv3_annotations_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/vendor/github.com/google/gnostic/openapiv3/annotations.proto deleted file mode 100644 index 0bd87810..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/annotations.proto +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package openapi.v3; - -import "openapiv3/OpenAPIv3.proto"; -import "google/protobuf/descriptor.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "AnnotationsProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v3"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; - -extend google.protobuf.FileOptions { - Document document = 1143; -} - -extend google.protobuf.MethodOptions { - Operation operation = 1143; -} - -extend google.protobuf.MessageOptions { - Schema schema = 1143; -} - -extend google.protobuf.FieldOptions { - Schema property = 1143; -} \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json deleted file mode 100644 index d5caed16..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json +++ /dev/null @@ -1,1251 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anysOrExpressions" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": false, - "patternProperties": { - "^[a-zA-Z0-9\\.\\-_]+$": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "anysOrExpressions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/anyOrExpression" - } - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json deleted file mode 100644 index ed0b83ad..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json +++ /dev/null @@ -1,1250 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - }, - "summary": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anyOrExpression" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..2bd78667 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..55668887 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..a56138cc 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/grafana/dskit/backoff/backoff.go b/vendor/github.com/grafana/dskit/backoff/backoff.go index c5d45471..7ce55647 100644 --- a/vendor/github.com/grafana/dskit/backoff/backoff.go +++ b/vendor/github.com/grafana/dskit/backoff/backoff.go @@ -77,9 +77,12 @@ func (b *Backoff) Wait() { sleepTime := b.NextDelay() if b.Ongoing() { + timer := time.NewTimer(sleepTime) + defer timer.Stop() + select { case <-b.ctx.Done(): - case <-time.After(sleepTime): + case <-timer.C: } } } diff --git a/vendor/github.com/grafana/dskit/crypto/tls/tls.go b/vendor/github.com/grafana/dskit/crypto/tls/tls.go index ba4a0da9..7ed818f3 100644 --- a/vendor/github.com/grafana/dskit/crypto/tls/tls.go +++ b/vendor/github.com/grafana/dskit/crypto/tls/tls.go @@ -8,13 +8,22 @@ import ( "os" "strings" - "google.golang.org/grpc/credentials/insecure" - "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) +type SecretReader interface { + ReadSecret(path string) ([]byte, error) +} + +type fileReader struct{} + +func (f *fileReader) ReadSecret(path string) ([]byte, error) { + return os.ReadFile(path) +} + // ClientConfig is the config for client TLS. type ClientConfig struct { CertPath string `yaml:"tls_cert_path" category:"advanced"` @@ -24,6 +33,8 @@ type ClientConfig struct { InsecureSkipVerify bool `yaml:"tls_insecure_skip_verify" category:"advanced"` CipherSuites string `yaml:"tls_cipher_suites" category:"advanced" doc:"description_method=GetTLSCipherSuitesLongDescription"` MinVersion string `yaml:"tls_min_version" category:"advanced"` + + Reader SecretReader `yaml:"-"` } var ( @@ -40,9 +51,12 @@ var ( // RegisterFlagsWithPrefix registers flags with prefix. func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.CertPath, prefix+".tls-cert-path", "", "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.") - f.StringVar(&cfg.KeyPath, prefix+".tls-key-path", "", "Path to the key file for the client certificate. Also requires the client certificate to be configured.") - f.StringVar(&cfg.CAPath, prefix+".tls-ca-path", "", "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.") + // Trim any trailing "." since we include our own here + prefix = strings.TrimRight(prefix, ".") + + f.StringVar(&cfg.CertPath, prefix+".tls-cert-path", "", "Path to the client certificate, which will be used for authenticating with the server. Also requires the key path to be configured.") + f.StringVar(&cfg.KeyPath, prefix+".tls-key-path", "", "Path to the key for the client certificate. Also requires the client certificate to be configured.") + f.StringVar(&cfg.CAPath, prefix+".tls-ca-path", "", "Path to the CA certificates to validate server certificate against. If not set, the host's root CA certificates are used.") f.StringVar(&cfg.ServerName, prefix+".tls-server-name", "", "Override the expected name on the server certificate.") f.BoolVar(&cfg.InsecureSkipVerify, prefix+".tls-insecure-skip-verify", false, "Skip validating server certificate.") f.StringVar(&cfg.CipherSuites, prefix+".tls-cipher-suites", "", cfg.GetTLSCipherSuitesShortDescription()) @@ -76,10 +90,16 @@ func (cfg *ClientConfig) GetTLSConfig() (*tls.Config, error) { ServerName: cfg.ServerName, } - // read ca certificates + // If Reader interface not provided, default to reading from File + reader := cfg.Reader + if reader == nil { + reader = &fileReader{} + } + + // Read CA Certificates if cfg.CAPath != "" { var caCertPool *x509.CertPool - caCert, err := os.ReadFile(cfg.CAPath) + caCert, err := reader.ReadSecret(cfg.CAPath) if err != nil { return nil, errors.Wrapf(err, "error loading ca cert: %s", cfg.CAPath) } @@ -89,7 +109,7 @@ func (cfg *ClientConfig) GetTLSConfig() (*tls.Config, error) { config.RootCAs = caCertPool } - // read client certificate + // Read Client Certificate if cfg.CertPath != "" || cfg.KeyPath != "" { if cfg.CertPath == "" { return nil, errCertMissing @@ -97,7 +117,17 @@ func (cfg *ClientConfig) GetTLSConfig() (*tls.Config, error) { if cfg.KeyPath == "" { return nil, errKeyMissing } - clientCert, err := tls.LoadX509KeyPair(cfg.CertPath, cfg.KeyPath) + + cert, err := reader.ReadSecret(cfg.CertPath) + if err != nil { + return nil, errors.Wrapf(err, "error loading client cert: %s", cfg.CertPath) + } + key, err := reader.ReadSecret(cfg.KeyPath) + if err != nil { + return nil, errors.Wrapf(err, "error loading client key: %s", cfg.KeyPath) + } + + clientCert, err := tls.X509KeyPair(cert, key) if err != nil { return nil, errors.Wrapf(err, "failed to load TLS certificate %s,%s", cfg.CertPath, cfg.KeyPath) } diff --git a/vendor/github.com/grafana/dskit/dns/godns/resolver.go b/vendor/github.com/grafana/dskit/dns/godns/resolver.go new file mode 100644 index 00000000..1106f10c --- /dev/null +++ b/vendor/github.com/grafana/dskit/dns/godns/resolver.go @@ -0,0 +1,26 @@ +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/discovery/dns/godns/resolver.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Thanos Authors. + +package godns + +import ( + "net" + + "github.com/pkg/errors" +) + +// Resolver is a wrapper for net.Resolver. +type Resolver struct { + *net.Resolver +} + +// IsNotFound checkout if DNS record is not found. +func (r *Resolver) IsNotFound(err error) bool { + if err == nil { + return false + } + err = errors.Cause(err) + dnsErr, ok := err.(*net.DNSError) + return ok && dnsErr.IsNotFound +} diff --git a/vendor/github.com/grafana/dskit/dns/miekgdns/lookup.go b/vendor/github.com/grafana/dskit/dns/miekgdns/lookup.go new file mode 100644 index 00000000..11789855 --- /dev/null +++ b/vendor/github.com/grafana/dskit/dns/miekgdns/lookup.go @@ -0,0 +1,157 @@ +// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/be3c082539d8/discovery/dns/dns.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/discovery/dns/miekgdns/provider.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Thanos Authors. + +package miekgdns + +import ( + "bytes" + "net" + + "github.com/miekg/dns" + "github.com/pkg/errors" +) + +var ErrNoSuchHost = errors.New("no such host") + +// Copied and slightly adjusted from Prometheus DNS SD: +// https://github.com/prometheus/prometheus/blob/be3c082539d85908ce03b6d280f83343e7c930eb/discovery/dns/dns.go#L212 + +// lookupWithSearchPath tries to get an answer for various permutations of +// the given name, appending the system-configured search path as necessary. +// +// There are three possible outcomes: +// +// 1. One of the permutations of the given name is recognized as +// "valid" by the DNS, in which case we consider ourselves "done" +// and that answer is returned. Note that, due to the way the DNS +// handles "name has resource records, but none of the specified type", +// the answer received may have an empty set of results. +// +// 2. All of the permutations of the given name are responded to by one of +// the servers in the "nameservers" list with the answer "that name does +// not exist" (NXDOMAIN). In that case, it can be considered +// pseudo-authoritative that there are no records for that name. +// +// 3. One or more of the names was responded to by all servers with some +// sort of error indication. In that case, we can't know if, in fact, +// there are records for the name or not, so whatever state the +// configuration is in, we should keep it that way until we know for +// sure (by, presumably, all the names getting answers in the future). +// +// Outcomes 1 and 2 are indicated by a valid response message (possibly an +// empty one) and no error. Outcome 3 is indicated by an error return. The +// error will be generic-looking, because trying to return all the errors +// returned by the combination of all name permutations and servers is a +// nightmare. +func (r *Resolver) lookupWithSearchPath(name string, qtype dns.Type) (*dns.Msg, error) { + conf, err := dns.ClientConfigFromFile(r.ResolvConf) + if err != nil { + return nil, errors.Wrapf(err, "could not load resolv.conf: %s", err) + } + + var errs []error + for _, lname := range conf.NameList(name) { + response, err := lookupFromAnyServer(lname, qtype, conf) + if err != nil { + // We can't go home yet, because a later name + // may give us a valid, successful answer. However + // we can no longer say "this name definitely doesn't + // exist", because we did not get that answer for + // at least one name. + errs = append(errs, err) + continue + } + + if response.Rcode == dns.RcodeSuccess { + // Outcome 1: GOLD! + return response, nil + } + } + + if len(errs) == 0 { + // Outcome 2: everyone says NXDOMAIN. + return &dns.Msg{}, ErrNoSuchHost + } + // Outcome 3: boned. + return nil, errors.Errorf("could not resolve %q: all servers responded with errors to at least one search domain. Errs %s", name, fmtErrs(errs)) +} + +// lookupFromAnyServer uses all configured servers to try and resolve a specific +// name. If a viable answer is received from a server, then it is +// immediately returned, otherwise the other servers in the config are +// tried, and if none of them return a viable answer, an error is returned. +// +// A "viable answer" is one which indicates either: +// +// 1. "yes, I know that name, and here are its records of the requested type" +// (RCODE==SUCCESS, ANCOUNT > 0); +// 2. "yes, I know that name, but it has no records of the requested type" +// (RCODE==SUCCESS, ANCOUNT==0); or +// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). +// +// A non-viable answer is "anything else", which encompasses both various +// system-level problems (like network timeouts) and also +// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). +func lookupFromAnyServer(name string, qtype dns.Type, conf *dns.ClientConfig) (*dns.Msg, error) { + client := &dns.Client{} + + var errs []error + + // TODO(bwplotka): Worth to do fanout and grab fastest as golang native lib? + for _, server := range conf.Servers { + servAddr := net.JoinHostPort(server, conf.Port) + msg, err := askServerForName(name, qtype, client, servAddr, true) + if err != nil { + errs = append(errs, errors.Wrapf(err, "resolution against server %s for %s", server, name)) + continue + } + + if msg.Rcode == dns.RcodeSuccess || msg.Rcode == dns.RcodeNameError { + return msg, nil + } + } + + return nil, errors.Errorf("could not resolve %s: no servers returned a viable answer. Errs %v", name, fmtErrs(errs)) +} + +func fmtErrs(errs []error) string { + b := bytes.Buffer{} + for _, err := range errs { + b.WriteString(";") + b.WriteString(err.Error()) + } + return b.String() +} + +// askServerForName makes a request to a specific DNS server for a specific +// name (and qtype). Retries with TCP in the event of response truncation, +// but otherwise just sends back whatever the server gave, whether that be a +// valid-looking response, or an error. +func askServerForName(name string, qType dns.Type, client *dns.Client, servAddr string, edns bool) (*dns.Msg, error) { + msg := &dns.Msg{} + + msg.SetQuestion(dns.Fqdn(name), uint16(qType)) + if edns { + msg.SetEdns0(dns.DefaultMsgSize, false) + } + + response, _, err := client.Exchange(msg, servAddr) + if err != nil { + return nil, errors.Wrapf(err, "exchange") + } + + if response.Truncated { + if client.Net == "tcp" { + return nil, errors.New("got truncated message on TCP (64kiB limit exceeded?)") + } + + // TCP fallback. + client.Net = "tcp" + return askServerForName(name, qType, client, servAddr, false) + } + + return response, nil +} diff --git a/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go b/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go new file mode 100644 index 00000000..8242adbe --- /dev/null +++ b/vendor/github.com/grafana/dskit/dns/miekgdns/resolver.go @@ -0,0 +1,95 @@ +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/discovery/dns/miekgdns/resolver.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Thanos Authors. + +package miekgdns + +import ( + "context" + "net" + + "github.com/miekg/dns" + "github.com/pkg/errors" +) + +// DefaultResolvConfPath is a common, default resolv.conf file present on linux server. +const DefaultResolvConfPath = "/etc/resolv.conf" + +// Resolver is a drop-in Resolver for *part* of std lib Golang net.DefaultResolver methods. +type Resolver struct { + ResolvConf string +} + +func (r *Resolver) LookupSRV(_ context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) { + var target string + if service == "" && proto == "" { + target = name + } else { + target = "_" + service + "._" + proto + "." + name + } + + response, err := r.lookupWithSearchPath(target, dns.Type(dns.TypeSRV)) + if err != nil { + return "", nil, err + } + + for _, record := range response.Answer { + switch addr := record.(type) { + case *dns.SRV: + addrs = append(addrs, &net.SRV{ + Weight: addr.Weight, + Target: addr.Target, + Priority: addr.Priority, + Port: addr.Port, + }) + default: + return "", nil, errors.Errorf("invalid SRV response record %s", record) + } + } + + return "", addrs, nil +} + +func (r *Resolver) LookupIPAddr(_ context.Context, host string) ([]net.IPAddr, error) { + return r.lookupIPAddr(host, 1, 8) +} + +func (r *Resolver) lookupIPAddr(host string, currIteration, maxIterations int) ([]net.IPAddr, error) { + // We want to protect from infinite loops when resolving DNS records recursively. + if currIteration > maxIterations { + return nil, errors.Errorf("maximum number of recursive iterations reached (%d)", maxIterations) + } + + response, err := r.lookupWithSearchPath(host, dns.Type(dns.TypeAAAA)) + if err != nil || len(response.Answer) == 0 { + // Ugly fallback to A lookup. + response, err = r.lookupWithSearchPath(host, dns.Type(dns.TypeA)) + if err != nil { + return nil, err + } + } + + var resp []net.IPAddr + for _, record := range response.Answer { + switch addr := record.(type) { + case *dns.A: + resp = append(resp, net.IPAddr{IP: addr.A}) + case *dns.AAAA: + resp = append(resp, net.IPAddr{IP: addr.AAAA}) + case *dns.CNAME: + // Recursively resolve it. + addrs, err := r.lookupIPAddr(addr.Target, currIteration+1, maxIterations) + if err != nil { + return nil, errors.Wrapf(err, "recursively resolve %s", addr.Target) + } + resp = append(resp, addrs...) + default: + return nil, errors.Errorf("invalid A, AAAA or CNAME response record %s", record) + } + } + return resp, nil +} + +func (r *Resolver) IsNotFound(err error) bool { + return errors.Is(errors.Cause(err), ErrNoSuchHost) +} diff --git a/vendor/github.com/grafana/dskit/dns/provider.go b/vendor/github.com/grafana/dskit/dns/provider.go new file mode 100644 index 00000000..004111ef --- /dev/null +++ b/vendor/github.com/grafana/dskit/dns/provider.go @@ -0,0 +1,182 @@ +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/discovery/provider.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Thanos Authors. + +package dns + +import ( + "context" + "net" + "strings" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/dskit/dns/godns" + "github.com/grafana/dskit/dns/miekgdns" + "github.com/grafana/dskit/multierror" +) + +// Provider is a stateful cache for asynchronous DNS resolutions. It provides a way to resolve addresses and obtain them. +type Provider struct { + sync.RWMutex + resolver Resolver + // A map from domain name to a slice of resolved targets. + resolved map[string][]string + logger log.Logger + + resolverAddrsDesc *prometheus.Desc + resolverLookupsCount prometheus.Counter + resolverFailuresCount prometheus.Counter +} + +type ResolverType string + +const ( + GolangResolverType ResolverType = "golang" + MiekgdnsResolverType ResolverType = "miekgdns" +) + +func (t ResolverType) ToResolver(logger log.Logger) ipLookupResolver { + var r ipLookupResolver + switch t { + case GolangResolverType: + r = &godns.Resolver{Resolver: net.DefaultResolver} + case MiekgdnsResolverType: + r = &miekgdns.Resolver{ResolvConf: miekgdns.DefaultResolvConfPath} + default: + level.Warn(logger).Log("msg", "no such resolver type, defaulting to golang", "type", t) + r = &godns.Resolver{Resolver: net.DefaultResolver} + } + return r +} + +// NewProvider returns a new empty provider with a given resolver type. +// If empty resolver type is net.DefaultResolver. +func NewProvider(logger log.Logger, reg prometheus.Registerer, resolverType ResolverType) *Provider { + p := &Provider{ + resolver: NewResolver(resolverType.ToResolver(logger), logger), + resolved: make(map[string][]string), + logger: logger, + resolverAddrsDesc: prometheus.NewDesc( + "dns_provider_results", + "The number of resolved endpoints for each configured address", + []string{"addr"}, + nil), + resolverLookupsCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "dns_lookups_total", + Help: "The number of DNS lookups resolutions attempts", + }), + resolverFailuresCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "dns_failures_total", + Help: "The number of DNS lookup failures", + }), + } + if reg != nil { + reg.MustRegister(p) + } + + return p +} + +// Clone returns a new provider from an existing one. +func (p *Provider) Clone() *Provider { + return &Provider{ + resolver: p.resolver, + resolved: make(map[string][]string), + logger: p.logger, + resolverAddrsDesc: p.resolverAddrsDesc, + resolverLookupsCount: p.resolverLookupsCount, + resolverFailuresCount: p.resolverFailuresCount, + } +} + +// IsDynamicNode returns if the specified StoreAPI addr uses +// any kind of SD mechanism. +func IsDynamicNode(addr string) bool { + qtype, _ := GetQTypeName(addr) + return qtype != "" +} + +// GetQTypeName splits the provided addr into two parts: the QType (if any) +// and the name. +func GetQTypeName(addr string) (qtype, name string) { + qtypeAndName := strings.SplitN(addr, "+", 2) + if len(qtypeAndName) != 2 { + return "", addr + } + return qtypeAndName[0], qtypeAndName[1] +} + +// Resolve stores a list of provided addresses or their DNS records if requested. +// Addresses prefixed with `dns+` or `dnssrv+` will be resolved through respective DNS lookup (A/AAAA or SRV). +// For non-SRV records, it will return an error if a port is not supplied. +func (p *Provider) Resolve(ctx context.Context, addrs []string) error { + resolvedAddrs := map[string][]string{} + errs := multierror.MultiError{} + + for _, addr := range addrs { + var resolved []string + qtype, name := GetQTypeName(addr) + if qtype == "" { + resolvedAddrs[name] = []string{name} + continue + } + + resolved, err := p.resolver.Resolve(ctx, name, QType(qtype)) + p.resolverLookupsCount.Inc() + if err != nil { + // Append all the failed dns resolution in the error list. + errs.Add(err) + // The DNS resolution failed. Continue without modifying the old records. + p.resolverFailuresCount.Inc() + // Use cached values. + p.RLock() + resolved = p.resolved[addr] + p.RUnlock() + } + resolvedAddrs[addr] = resolved + } + + // All addresses have been resolved. We can now take an exclusive lock to + // update the local state. + p.Lock() + defer p.Unlock() + + p.resolved = resolvedAddrs + + return errs.Err() +} + +// Addresses returns the latest addresses present in the Provider. +func (p *Provider) Addresses() []string { + p.RLock() + defer p.RUnlock() + + var result []string + for _, addrs := range p.resolved { + result = append(result, addrs...) + } + return result +} + +// Describe implements prometheus.Collector +func (p *Provider) Describe(ch chan<- *prometheus.Desc) { + ch <- p.resolverAddrsDesc +} + +// Describe implements prometheus.Collector +func (p *Provider) Collect(ch chan<- prometheus.Metric) { + p.RLock() + defer p.RUnlock() + + for name, addrs := range p.resolved { + metric, err := prometheus.NewConstMetric(p.resolverAddrsDesc, prometheus.GaugeValue, float64(len(addrs)), name) + if err == nil { + ch <- metric + } + } +} diff --git a/vendor/github.com/grafana/dskit/dns/resolver.go b/vendor/github.com/grafana/dskit/dns/resolver.go new file mode 100644 index 00000000..d0d4fcfc --- /dev/null +++ b/vendor/github.com/grafana/dskit/dns/resolver.go @@ -0,0 +1,145 @@ +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/discovery/provider.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Thanos Authors. + +package dns + +import ( + "context" + "net" + "strconv" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + "github.com/pkg/errors" +) + +type QType string + +const ( + // A qtype performs A/AAAA lookup. + A = QType("dns") + // SRV qtype performs SRV lookup with A/AAAA lookup for each SRV result. + SRV = QType("dnssrv") + // SRVNoA qtype performs SRV lookup without any A/AAAA lookup for each SRV result. + SRVNoA = QType("dnssrvnoa") +) + +type Resolver interface { + // Resolve performs a DNS lookup and returns a list of records. + // name is the domain name to be resolved. + // qtype is the query type. Accepted values are `dns` for A/AAAA lookup and `dnssrv` for SRV lookup. + // If scheme is passed through name, it is preserved on IP results. + Resolve(ctx context.Context, name string, qtype QType) ([]string, error) +} + +type ipLookupResolver interface { + LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + IsNotFound(err error) bool +} + +type dnsSD struct { + resolver ipLookupResolver + logger log.Logger +} + +// NewResolver creates a resolver with given underlying resolver. +func NewResolver(resolver ipLookupResolver, logger log.Logger) Resolver { + return &dnsSD{resolver: resolver, logger: logger} +} + +func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string, error) { + var ( + res []string + scheme string + ) + + schemeSplit := strings.Split(name, "//") + if len(schemeSplit) > 1 { + scheme = schemeSplit[0] + name = schemeSplit[1] + } + + // Split the host and port if present. + host, port, err := net.SplitHostPort(name) + if err != nil { + // The host could be missing a port. + host, port = name, "" + } + + switch qtype { + case A: + if port == "" { + return nil, errors.Errorf("missing port in address given for dns lookup: %v", name) + } + ips, err := s.resolver.LookupIPAddr(ctx, host) + if err != nil { + // We exclude error from std Golang resolver for the case of the domain (e.g `NXDOMAIN`) not being found by DNS + // server. Since `miekg` does not consider this as an error, when the host cannot be found, empty slice will be + // returned. + if !s.resolver.IsNotFound(err) { + return nil, errors.Wrapf(err, "lookup IP addresses %q", host) + } + if ips == nil { + level.Error(s.logger).Log("msg", "failed to lookup IP addresses", "host", host, "err", err) + } + } + for _, ip := range ips { + res = append(res, appendScheme(scheme, net.JoinHostPort(ip.String(), port))) + } + case SRV, SRVNoA: + _, recs, err := s.resolver.LookupSRV(ctx, "", "", host) + if err != nil { + if !s.resolver.IsNotFound(err) { + return nil, errors.Wrapf(err, "lookup SRV records %q", host) + } + if len(recs) == 0 { + level.Error(s.logger).Log("msg", "failed to lookup SRV records", "host", host, "err", err) + } + } + + for _, rec := range recs { + // Only use port from SRV record if no explicit port was specified. + resPort := port + if resPort == "" { + resPort = strconv.Itoa(int(rec.Port)) + } + + if qtype == SRVNoA { + res = append(res, appendScheme(scheme, net.JoinHostPort(rec.Target, resPort))) + continue + } + // Do A lookup for the domain in SRV answer. + resIPs, err := s.resolver.LookupIPAddr(ctx, rec.Target) + if err != nil { + if !s.resolver.IsNotFound(err) { + return nil, errors.Wrapf(err, "lookup IP addresses %q", host) + } + if len(resIPs) == 0 { + level.Error(s.logger).Log("msg", "failed to lookup IP addresses", "srv", host, "a", rec.Target, "err", err) + } + } + for _, resIP := range resIPs { + res = append(res, appendScheme(scheme, net.JoinHostPort(resIP.String(), resPort))) + } + } + default: + return nil, errors.Errorf("invalid lookup scheme %q", qtype) + } + + if res == nil && err == nil { + level.Warn(s.logger).Log("msg", "IP address lookup yielded no results. No host found or no addresses found", "host", host) + } + + return res, nil +} + +func appendScheme(scheme, host string) string { + if scheme == "" { + return host + } + return scheme + "//" + host +} diff --git a/vendor/github.com/grafana/dskit/flagext/bytes.go b/vendor/github.com/grafana/dskit/flagext/bytes.go index dabb101b..61a5c9d0 100644 --- a/vendor/github.com/grafana/dskit/flagext/bytes.go +++ b/vendor/github.com/grafana/dskit/flagext/bytes.go @@ -8,18 +8,18 @@ import ( "github.com/alecthomas/units" ) -// Bytes is a data type which supports yaml serialization/deserialization -// with units. +// Bytes is a data type which supports use as a flag and yaml +// serialization/deserialization with units. type Bytes uint64 -func (b *Bytes) UnmarshalYAML(unmarshal func(interface{}) error) error { - var value string - err := unmarshal(&value) - if err != nil { - return err - } +// String implements flag.Value +func (b *Bytes) String() string { + return units.Base2Bytes(*b).String() +} - bytes, err := units.ParseBase2Bytes(value) +// Set implements flag.Value +func (b *Bytes) Set(s string) error { + bytes, err := units.ParseBase2Bytes(s) if err != nil { return err } @@ -28,6 +28,15 @@ func (b *Bytes) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (b *Bytes) MarshalYAML() (interface{}, error) { - return units.Base2Bytes(*b).String(), nil +func (b *Bytes) UnmarshalYAML(unmarshal func(interface{}) error) error { + var value string + if err := unmarshal(&value); err != nil { + return err + } + + return b.Set(value) +} + +func (b Bytes) MarshalYAML() (interface{}, error) { + return b.String(), nil } diff --git a/vendor/github.com/grafana/dskit/flagext/parse.go b/vendor/github.com/grafana/dskit/flagext/parse.go new file mode 100644 index 00000000..41d1baff --- /dev/null +++ b/vendor/github.com/grafana/dskit/flagext/parse.go @@ -0,0 +1,28 @@ +package flagext + +import ( + "flag" + "fmt" + "os" + "strings" +) + +// ParseFlagsAndArguments calls Parse() on the input flag.FlagSet and returns the parsed arguments. +func ParseFlagsAndArguments(f *flag.FlagSet) ([]string, error) { + err := f.Parse(os.Args[1:]) + return f.Args(), err +} + +// ParseFlagsWithoutArguments calls Parse() on the input flag.FlagSet and enforces no arguments have been parsed. +// This utility should be called whenever we only expect CLI flags but no arguments. +func ParseFlagsWithoutArguments(f *flag.FlagSet) error { + if err := f.Parse(os.Args[1:]); err != nil { + return err + } + + if f.NArg() > 0 { + return fmt.Errorf("the command does not support any argument, but some were provided: %s", strings.Join(f.Args(), " ")) + } + + return nil +} diff --git a/vendor/github.com/grafana/dskit/grpcutil/metadata.go b/vendor/github.com/grafana/dskit/grpcutil/metadata.go new file mode 100644 index 00000000..90f60c54 --- /dev/null +++ b/vendor/github.com/grafana/dskit/grpcutil/metadata.go @@ -0,0 +1,20 @@ +package grpcutil + +import ( + "context" + "strconv" + + "google.golang.org/grpc/metadata" +) + +// MetadataMessageSize is grpc metadata key for message size. +const MetadataMessageSize = "message-size" + +// Sizer can return its size in bytes. +type Sizer interface { + Size() int +} + +func AppendMessageSizeToOutgoingContext(ctx context.Context, req Sizer) context.Context { + return metadata.AppendToOutgoingContext(ctx, MetadataMessageSize, strconv.Itoa(req.Size())) +} diff --git a/vendor/github.com/grafana/dskit/grpcutil/status.go b/vendor/github.com/grafana/dskit/grpcutil/status.go new file mode 100644 index 00000000..a9e9aab2 --- /dev/null +++ b/vendor/github.com/grafana/dskit/grpcutil/status.go @@ -0,0 +1,70 @@ +package grpcutil + +import ( + "context" + "errors" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" +) + +// ErrorToStatus returns a *github.com/gogo/status.Status representation of err. +// +// - If err implements the method `GRPCStatus() *google.golang.org/grpc/status.Status` and +// `GRPCStatus()` does not return nil, or if err wraps a type satisfying this, Status from +// `GRPCStatus()` is converted to gogo Status, and returned. In that case, ok is true. +// +// - If err is or GRPCStatus() returns nil, a nil Status is returned and ok is false. +// +// - Otherwise, err is an error not compatible with this function. In this +// case, a nil Status is returned and ok is false. +func ErrorToStatus(err error) (*status.Status, bool) { + if err == nil { + return nil, false + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st == nil { + return nil, false + } + return status.FromGRPCStatus(st), true + } + return nil, false +} + +// ErrorToStatusCode extracts gRPC status code from error and returns it. +// +// - If err is nil, codes.OK is returned. +// +// - If err implements (or wraps error that implements) the method +// `GRPCStatus() *google.golang.org/grpc/status.Status`, and +// `GRPCStatus()` returns a non-nil status, code from the status +// is returned. +// +// - Otherwise code.Unknown is returned. +func ErrorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st != nil { + return st.Code() + } + } + return codes.Unknown +} + +// IsCanceled checks whether an error comes from an operation being canceled. +func IsCanceled(err error) bool { + if errors.Is(err, context.Canceled) { + return true + } + statusCode := ErrorToStatusCode(err) + return statusCode == codes.Canceled +} diff --git a/vendor/github.com/grafana/dskit/grpcutil/util.go b/vendor/github.com/grafana/dskit/grpcutil/util.go deleted file mode 100644 index 858a0e86..00000000 --- a/vendor/github.com/grafana/dskit/grpcutil/util.go +++ /dev/null @@ -1,17 +0,0 @@ -package grpcutil - -import ( - "github.com/gogo/status" - "google.golang.org/grpc/codes" -) - -// IsGRPCContextCanceled returns whether the input error is a GRPC error wrapping -// the context.Canceled error. -func IsGRPCContextCanceled(err error) bool { - s, ok := status.FromError(err) - if !ok { - return false - } - - return s.Code() == codes.Canceled -} diff --git a/vendor/github.com/weaveworks/common/httpgrpc/README.md b/vendor/github.com/grafana/dskit/httpgrpc/README.md similarity index 100% rename from vendor/github.com/weaveworks/common/httpgrpc/README.md rename to vendor/github.com/grafana/dskit/httpgrpc/README.md diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go new file mode 100644 index 00000000..e1f044d8 --- /dev/null +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go @@ -0,0 +1,150 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/httpgrpc/httpgrpc.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package httpgrpc + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "github.com/go-kit/log/level" + spb "github.com/gogo/googleapis/google/rpc" + "github.com/gogo/protobuf/types" + "github.com/gogo/status" + "google.golang.org/grpc/metadata" + + "github.com/grafana/dskit/grpcutil" + "github.com/grafana/dskit/log" +) + +const ( + MetadataMethod = "httpgrpc-method" + MetadataURL = "httpgrpc-url" +) + +// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata. +func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context { + return metadata.AppendToOutgoingContext(ctx, + MetadataMethod, req.Method, + MetadataURL, req.Url) +} + +type nopCloser struct { + *bytes.Buffer +} + +func (nopCloser) Close() error { return nil } + +// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. +func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } + +// FromHTTPRequest converts an ordinary http.Request into an httpgrpc.HTTPRequest +func FromHTTPRequest(r *http.Request) (*HTTPRequest, error) { + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + return &HTTPRequest{ + Method: r.Method, + Url: r.RequestURI, + Body: body, + Headers: FromHeader(r.Header), + }, nil +} + +// ToHTTPRequest converts httpgrpc.HTTPRequest to http.Request. +func ToHTTPRequest(ctx context.Context, r *HTTPRequest) (*http.Request, error) { + req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + if err != nil { + return nil, err + } + ToHeader(r.Headers, req.Header) + req = req.WithContext(ctx) + req.RequestURI = r.Url + req.ContentLength = int64(len(r.Body)) + return req, nil +} + +// WriteResponse converts an httpgrpc response to an HTTP one +func WriteResponse(w http.ResponseWriter, resp *HTTPResponse) error { + ToHeader(resp.Headers, w.Header()) + w.WriteHeader(int(resp.Code)) + _, err := w.Write(resp.Body) + return err +} + +// WriteError converts an httpgrpc error to an HTTP one +func WriteError(w http.ResponseWriter, err error) { + resp, ok := HTTPResponseFromError(err) + if ok { + _ = WriteResponse(w, resp) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func ToHeader(hs []*Header, header http.Header) { + for _, h := range hs { + header[h.Key] = h.Values + } +} + +func FromHeader(hs http.Header) []*Header { + result := make([]*Header, 0, len(hs)) + for k, vs := range hs { + result = append(result, &Header{ + Key: k, + Values: vs, + }) + } + return result +} + +// Errorf returns a HTTP gRPC error than is correctly forwarded over +// gRPC, and can eventually be converted back to a HTTP response with +// HTTPResponseFromError. +func Errorf(code int, tmpl string, args ...interface{}) error { + return ErrorFromHTTPResponse(&HTTPResponse{ + Code: int32(code), + Body: []byte(fmt.Sprintf(tmpl, args...)), + }) +} + +// ErrorFromHTTPResponse converts an HTTP response into a grpc error +func ErrorFromHTTPResponse(resp *HTTPResponse) error { + a, err := types.MarshalAny(resp) + if err != nil { + return err + } + + return status.ErrorProto(&spb.Status{ + Code: resp.Code, + Message: string(resp.Body), + Details: []*types.Any{a}, + }) +} + +// HTTPResponseFromError converts a grpc error into an HTTP response +func HTTPResponseFromError(err error) (*HTTPResponse, bool) { + s, ok := grpcutil.ErrorToStatus(err) + if !ok { + return nil, false + } + + status := s.Proto() + if len(status.Details) != 1 { + return nil, false + } + + var resp HTTPResponse + if err := types.UnmarshalAny(status.Details[0], &resp); err != nil { + level.Error(log.Global()).Log("msg", "got error containing non-response", "err", err) + return nil, false + } + + return &resp, true +} diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.pb.go similarity index 89% rename from vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go rename to vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.pb.go index 4b3f6ad9..bab0efd5 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: httpgrpc/httpgrpc.proto +// source: httpgrpc.proto package httpgrpc @@ -40,7 +40,7 @@ type HTTPRequest struct { func (m *HTTPRequest) Reset() { *m = HTTPRequest{} } func (*HTTPRequest) ProtoMessage() {} func (*HTTPRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6670c8e151665986, []int{0} + return fileDescriptor_c50820dbc814fcdd, []int{0} } func (m *HTTPRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ type HTTPResponse struct { func (m *HTTPResponse) Reset() { *m = HTTPResponse{} } func (*HTTPResponse) ProtoMessage() {} func (*HTTPResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6670c8e151665986, []int{1} + return fileDescriptor_c50820dbc814fcdd, []int{1} } func (m *HTTPResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ type Header struct { func (m *Header) Reset() { *m = Header{} } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_6670c8e151665986, []int{2} + return fileDescriptor_c50820dbc814fcdd, []int{2} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -213,31 +213,29 @@ func init() { proto.RegisterType((*Header)(nil), "httpgrpc.Header") } -func init() { proto.RegisterFile("httpgrpc/httpgrpc.proto", fileDescriptor_6670c8e151665986) } +func init() { proto.RegisterFile("httpgrpc.proto", fileDescriptor_c50820dbc814fcdd) } -var fileDescriptor_6670c8e151665986 = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x3f, 0x4f, 0xc2, 0x40, - 0x14, 0xc0, 0x7b, 0x14, 0xab, 0x1c, 0x0c, 0xe4, 0xa2, 0xd8, 0x30, 0x5c, 0x08, 0x89, 0x09, 0x71, - 0xa0, 0x09, 0x4e, 0x8e, 0xea, 0xc2, 0xa6, 0xb9, 0x30, 0xb9, 0xb5, 0xf4, 0xa5, 0x35, 0x50, 0x5e, - 0x6d, 0xaf, 0x10, 0x36, 0x3f, 0x82, 0x1f, 0xc3, 0x8f, 0xe2, 0xc8, 0xc8, 0x28, 0xe5, 0x0b, 0xf8, - 0x11, 0xcc, 0x5d, 0x29, 0x36, 0x4e, 0x6e, 0xbf, 0xf7, 0x27, 0xf7, 0x7b, 0xef, 0x1e, 0xbd, 0x0c, - 0xa5, 0x8c, 0x83, 0x24, 0x9e, 0x3a, 0x25, 0x0c, 0xe3, 0x04, 0x25, 0xb2, 0xb3, 0x32, 0xee, 0x9e, - 0x07, 0x18, 0xa0, 0x4e, 0x3a, 0x8a, 0x8a, 0x7a, 0x7f, 0x45, 0x9b, 0xe3, 0xc9, 0xe4, 0x49, 0xc0, - 0x6b, 0x06, 0xa9, 0x64, 0x1d, 0x6a, 0x45, 0x20, 0x43, 0xf4, 0x6d, 0xd2, 0x23, 0x83, 0x86, 0x38, - 0x44, 0xac, 0x4d, 0xcd, 0x2c, 0x99, 0xdb, 0x35, 0x9d, 0x54, 0xc8, 0xae, 0xe9, 0x69, 0x08, 0xae, - 0x0f, 0x49, 0x6a, 0x9b, 0x3d, 0x73, 0xd0, 0x1c, 0xb5, 0x87, 0x47, 0xf5, 0x58, 0x17, 0x44, 0xd9, - 0xc0, 0x18, 0xad, 0x7b, 0xe8, 0xaf, 0xed, 0x7a, 0x8f, 0x0c, 0x5a, 0x42, 0x73, 0xdf, 0xa3, 0xad, - 0x42, 0x9c, 0xc6, 0xb8, 0x48, 0x41, 0xf5, 0x3c, 0xa0, 0x0f, 0xda, 0x7b, 0x22, 0x34, 0x57, 0x1d, - 0xb5, 0xff, 0x3a, 0xcc, 0x8a, 0x63, 0x44, 0xad, 0xa2, 0x4d, 0xcd, 0x3f, 0x83, 0xf5, 0x61, 0x29, - 0x85, 0x6a, 0xd3, 0xa5, 0x3b, 0xcf, 0xa0, 0x78, 0xba, 0x21, 0x0e, 0xd1, 0xe8, 0x8e, 0xd6, 0xd5, - 0x5c, 0xec, 0x96, 0x5a, 0x63, 0x77, 0xe1, 0xcf, 0x81, 0x5d, 0x54, 0xa4, 0xbf, 0x5f, 0xd5, 0xed, - 0xfc, 0x4d, 0x17, 0x8b, 0xf4, 0x8d, 0xfb, 0xc7, 0xed, 0x8e, 0x1b, 0xdf, 0x3b, 0x4e, 0xde, 0x72, - 0x4e, 0x3e, 0x72, 0x4e, 0x3e, 0x73, 0x4e, 0x36, 0x39, 0x27, 0x5f, 0x39, 0x27, 0xef, 0x7b, 0x6e, - 0x6c, 0xf6, 0xdc, 0xd8, 0xee, 0xb9, 0xf1, 0x7c, 0x15, 0xbc, 0xc8, 0x30, 0xf3, 0x86, 0x53, 0x8c, - 0x9c, 0x15, 0xb8, 0x4b, 0x58, 0x61, 0x32, 0x4b, 0x9d, 0x29, 0x46, 0x11, 0x2e, 0x8e, 0xa7, 0xf4, - 0x2c, 0x7d, 0xab, 0x9b, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xb5, 0xfe, 0x5d, 0xe6, 0x01, - 0x00, 0x00, +var fileDescriptor_c50820dbc814fcdd = []byte{ + // 301 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbd, 0x4e, 0xc3, 0x30, + 0x14, 0x85, 0xed, 0xa6, 0x04, 0xea, 0x56, 0xa8, 0xb2, 0xa0, 0x8a, 0x3a, 0x5c, 0x55, 0x99, 0x22, + 0x86, 0x22, 0x05, 0x16, 0x46, 0x60, 0xc9, 0x88, 0xac, 0xbe, 0x40, 0x42, 0xac, 0x44, 0x22, 0xd4, + 0x21, 0x3f, 0xa0, 0x6e, 0x3c, 0x02, 0x8f, 0xc1, 0xa3, 0x30, 0x66, 0xec, 0x48, 0x9c, 0x85, 0xb1, + 0x8f, 0x80, 0xec, 0xa4, 0x10, 0x31, 0xb1, 0x9d, 0x7b, 0xee, 0x51, 0xbe, 0x7b, 0x62, 0x72, 0x1c, + 0x17, 0x45, 0x1a, 0x65, 0xe9, 0xfd, 0x32, 0xcd, 0x44, 0x21, 0xe8, 0xd1, 0x7e, 0x9e, 0x9f, 0x44, + 0x22, 0x12, 0xda, 0x3c, 0x57, 0xaa, 0xdd, 0xdb, 0x2f, 0x64, 0xec, 0xad, 0x56, 0x77, 0x8c, 0x3f, + 0x95, 0x3c, 0x2f, 0xe8, 0x8c, 0x98, 0x8f, 0xbc, 0x88, 0x45, 0x68, 0xe1, 0x05, 0x76, 0x46, 0xac, + 0x9b, 0xe8, 0x94, 0x18, 0x65, 0x96, 0x58, 0x03, 0x6d, 0x2a, 0x49, 0xcf, 0xc8, 0x61, 0xcc, 0xfd, + 0x90, 0x67, 0xb9, 0x65, 0x2c, 0x0c, 0x67, 0xec, 0x4e, 0x97, 0x3f, 0x68, 0x4f, 0x2f, 0xd8, 0x3e, + 0x40, 0x29, 0x19, 0x06, 0x22, 0xdc, 0x58, 0xc3, 0x05, 0x76, 0x26, 0x4c, 0x6b, 0x3b, 0x20, 0x93, + 0x16, 0x9c, 0xa7, 0x62, 0x9d, 0x73, 0x95, 0xb9, 0x15, 0x21, 0xd7, 0xdc, 0x03, 0xa6, 0x75, 0x9f, + 0x31, 0xf8, 0x2f, 0xc3, 0xe8, 0x31, 0x5c, 0x62, 0xb6, 0x31, 0x75, 0xff, 0x03, 0xdf, 0x74, 0xa5, + 0x94, 0x54, 0x4d, 0x9f, 0xfd, 0xa4, 0xe4, 0xed, 0xa7, 0x47, 0xac, 0x9b, 0xdc, 0x6b, 0x32, 0x54, + 0x77, 0xd1, 0x2b, 0x62, 0x7a, 0xfe, 0x3a, 0x4c, 0x38, 0x3d, 0xed, 0x41, 0x7f, 0x7f, 0xd5, 0x7c, + 0xf6, 0xd7, 0x6e, 0x8b, 0xd8, 0xe8, 0xe6, 0xb2, 0xaa, 0x01, 0x6d, 0x6b, 0x40, 0xbb, 0x1a, 0xf0, + 0xab, 0x04, 0xfc, 0x2e, 0x01, 0x7f, 0x48, 0xc0, 0x95, 0x04, 0xfc, 0x29, 0x01, 0x7f, 0x49, 0x40, + 0x3b, 0x09, 0xf8, 0xad, 0x01, 0x54, 0x35, 0x80, 0xb6, 0x0d, 0xa0, 0xc0, 0xd4, 0x0f, 0x72, 0xf1, + 0x1d, 0x00, 0x00, 0xff, 0xff, 0x44, 0x0e, 0x7c, 0xff, 0xc2, 0x01, 0x00, 0x00, } func (this *HTTPRequest) Equal(that interface{}) bool { @@ -471,7 +469,7 @@ var _HTTP_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "httpgrpc/httpgrpc.proto", + Metadata: "httpgrpc.proto", } func (m *HTTPRequest) Marshal() (dAtA []byte, err error) { @@ -1204,7 +1202,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { func skipHttpgrpc(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1236,8 +1233,10 @@ func skipHttpgrpc(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1258,30 +1257,55 @@ func skipHttpgrpc(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthHttpgrpc } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthHttpgrpc + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupHttpgrpc + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpgrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHttpgrpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthHttpgrpc + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthHttpgrpc - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthHttpgrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHttpgrpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupHttpgrpc = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthHttpgrpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttpgrpc = fmt.Errorf("proto: integer overflow") ) diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.proto similarity index 92% rename from vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto rename to vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.proto index 0313dbb6..8f546330 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.proto @@ -11,7 +11,6 @@ option (gogoproto.goproto_stringer_all) = false; option (gogoproto.goproto_unkeyed_all) = false; option (gogoproto.goproto_unrecognized_all) = false; option (gogoproto.goproto_sizecache_all) = false; -option go_package = "github.com/weaveworks/common/httpgrpc"; service HTTP { rpc Handle(HTTPRequest) returns (HTTPResponse) {}; diff --git a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go similarity index 57% rename from vendor/github.com/weaveworks/common/httpgrpc/server/server.go rename to vendor/github.com/grafana/dskit/httpgrpc/server/server.go index f5ee305b..c642f7fa 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go @@ -1,25 +1,35 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/httpgrpc/server/server.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package server import ( - "bytes" + "context" "fmt" - "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" "strings" - "sync" + "github.com/go-kit/log/level" otgrpc "github.com/opentracing-contrib/go-grpc" "github.com/opentracing/opentracing-go" - "github.com/sercand/kuberesolver/v4" - "golang.org/x/net/context" + "github.com/sercand/kuberesolver/v5" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/log" + "github.com/grafana/dskit/middleware" +) - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/logging" - "github.com/weaveworks/common/middleware" +var ( + // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response + // has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error + // will be marked as non-loggable. + DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError") ) // Server implements HTTPServer. HTTPServer is a generated interface that gRPC @@ -35,51 +45,46 @@ func NewServer(handler http.Handler) *Server { } } -type nopCloser struct { - *bytes.Buffer -} - -func (nopCloser) Close() error { return nil } - -// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. -func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } - // Handle implements HTTPServer. func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + req, err := httpgrpc.ToHTTPRequest(ctx, r) if err != nil { return nil, err } - toHeader(r.Headers, req.Header) - req = req.WithContext(ctx) - req.RequestURI = r.Url - req.ContentLength = int64(len(r.Body)) recorder := httptest.NewRecorder() s.handler.ServeHTTP(recorder, req) + header := recorder.Header() + + doNotLogError := false + if _, ok := header[DoNotLogErrorHeaderKey]; ok { + doNotLogError = true + header.Del(DoNotLogErrorHeaderKey) // remove before converting to httpgrpc resp + } + resp := &httpgrpc.HTTPResponse{ Code: int32(recorder.Code), - Headers: fromHeader(recorder.Header()), + Headers: httpgrpc.FromHeader(header), Body: recorder.Body.Bytes(), } if recorder.Code/100 == 5 { - return nil, httpgrpc.ErrorFromHTTPResponse(resp) + err := httpgrpc.ErrorFromHTTPResponse(resp) + if doNotLogError { + err = middleware.DoNotLogError{Err: err} + } + return nil, err } return resp, nil } // Client is a http.Handler that forwards the request over gRPC. type Client struct { - mtx sync.RWMutex - service string - namespace string - port string - client httpgrpc.HTTPClient - conn *grpc.ClientConn + client httpgrpc.HTTPClient + conn *grpc.ClientConn } // ParseURL deals with direct:// style URLs, as well as kubernetes:// urls. -// For backwards compatibility it treats URLs without schems as kubernetes://. +// For backwards compatibility it treats URLs without schemes as kubernetes://. func ParseURL(unparsed string) (string, error) { // if it has :///, this is the kuberesolver v2 URL. Return it as it is. if strings.Contains(unparsed, ":///") { @@ -106,15 +111,15 @@ func ParseURL(unparsed string) (string, error) { return "", err } parts := strings.SplitN(host, ".", 3) - service, namespace, domain := parts[0], "default", "" + service, domain := parts[0], "" if len(parts) > 1 { - namespace = parts[1] + namespace := parts[1] domain = "." + namespace } if len(parts) > 2 { domain = domain + "." + parts[2] } - address := fmt.Sprintf("kubernetes:///%s%s:%s", service, domain, port) + address := fmt.Sprintf("kubernetes:///%s", net.JoinHostPort(service+domain, port)) return address, nil default: @@ -134,7 +139,7 @@ func NewClient(address string) (*Client, error) { dialOptions := []grpc.DialOption{ grpc.WithDefaultServiceConfig(grpcServiceConfig), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithChainUnaryInterceptor( otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, @@ -152,49 +157,17 @@ func NewClient(address string) (*Client, error) { }, nil } -// HTTPRequest wraps an ordinary HTTPRequest with a gRPC one -func HTTPRequest(r *http.Request) (*httpgrpc.HTTPRequest, error) { - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - return &httpgrpc.HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: fromHeader(r.Header), - }, nil -} - -// WriteResponse converts an httpgrpc response to an HTTP one -func WriteResponse(w http.ResponseWriter, resp *httpgrpc.HTTPResponse) error { - toHeader(resp.Headers, w.Header()) - w.WriteHeader(int(resp.Code)) - _, err := w.Write(resp.Body) - return err -} - -// WriteError converts an httpgrpc error to an HTTP one -func WriteError(w http.ResponseWriter, err error) { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if ok { - WriteResponse(w, resp) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - // ServeHTTP implements http.Handler func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { if tracer := opentracing.GlobalTracer(); tracer != nil { if span := opentracing.SpanFromContext(r.Context()); span != nil { if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header)); err != nil { - logging.Global().Warnf("Failed to inject tracing headers into request: %v", err) + level.Warn(log.Global()).Log("msg", "failed to inject tracing headers into request", "err", err) } } } - req, err := HTTPRequest(r) + req, err := httpgrpc.FromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -211,25 +184,8 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - if err := WriteResponse(w, resp); err != nil { + if err := httpgrpc.WriteResponse(w, resp); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } - -func toHeader(hs []*httpgrpc.Header, header http.Header) { - for _, h := range hs { - header[h.Key] = h.Values - } -} - -func fromHeader(hs http.Header) []*httpgrpc.Header { - result := make([]*httpgrpc.Header, 0, len(hs)) - for k, vs := range hs { - result = append(result, &httpgrpc.Header{ - Key: k, - Values: vs, - }) - } - return result -} diff --git a/vendor/github.com/grafana/dskit/httpgrpc/tools.go b/vendor/github.com/grafana/dskit/httpgrpc/tools.go new file mode 100644 index 00000000..9117d39b --- /dev/null +++ b/vendor/github.com/grafana/dskit/httpgrpc/tools.go @@ -0,0 +1,10 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/httpgrpc/tools.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package httpgrpc + +import ( + // This is a workaround for go mod which fails to download gogoproto otherwise + _ "github.com/gogo/protobuf/gogoproto" +) diff --git a/vendor/github.com/weaveworks/common/instrument/instrument.go b/vendor/github.com/grafana/dskit/instrument/instrument.go similarity index 79% rename from vendor/github.com/weaveworks/common/instrument/instrument.go rename to vendor/github.com/grafana/dskit/instrument/instrument.go index 07aa033c..4ea480b2 100644 --- a/vendor/github.com/weaveworks/common/instrument/instrument.go +++ b/vendor/github.com/grafana/dskit/instrument/instrument.go @@ -1,3 +1,9 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/instrument/instrument.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +//lint:file-ignore faillint Changing from prometheus to promauto package would be a breaking change for consumers + package instrument import ( @@ -8,11 +14,10 @@ import ( "github.com/opentracing/opentracing-go/ext" otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" - oldcontext "golang.org/x/net/context" - "github.com/weaveworks/common/grpc" - "github.com/weaveworks/common/tracing" - "github.com/weaveworks/common/user" + "github.com/grafana/dskit/grpcutil" + "github.com/grafana/dskit/tracing" + "github.com/grafana/dskit/user" ) // DefBuckets are histogram buckets for the response time (in seconds) @@ -53,7 +58,7 @@ func (c *HistogramCollector) Register() { } // Before collects for the upcoming request. -func (c *HistogramCollector) Before(ctx context.Context, method string, start time.Time) { +func (c *HistogramCollector) Before(context.Context, string, time.Time) { } // After collects when the request is done. @@ -131,13 +136,13 @@ func (c *JobCollector) Register() { } // Before collects for the upcoming request. -func (c *JobCollector) Before(ctx context.Context, method string, start time.Time) { +func (c *JobCollector) Before(_ context.Context, method string, start time.Time) { c.start.WithLabelValues(method).Set(float64(start.UTC().Unix())) c.started.WithLabelValues(method).Inc() } // After collects when the request is done. -func (c *JobCollector) After(ctx context.Context, method, statusCode string, start time.Time) { +func (c *JobCollector) After(_ context.Context, method, statusCode string, start time.Time) { end := time.Now() c.end.WithLabelValues(method, statusCode).Set(float64(end.UTC().Unix())) c.duration.WithLabelValues(method, statusCode).Set(end.Sub(start).Seconds()) @@ -168,7 +173,7 @@ func CollectedRequest(ctx context.Context, method string, col Collector, toStatu col.After(newCtx, method, toStatusCode(err), start) if err != nil { - if !grpc.IsCanceled(err) { + if !grpcutil.IsCanceled(err) { ext.Error.Set(sp, true) } sp.LogFields(otlog.Error(err)) @@ -185,21 +190,3 @@ func ErrorCode(err error) string { } return "500" } - -// TimeRequestHistogram runs 'f' and records how long it took in the given Prometheus -// histogram metric. If 'f' returns successfully, record a "200". Otherwise, record -// "500". It will also emit an OpenTracing span if you have a global tracer configured. -// -// Deprecated: Use CollectedRequest() -func TimeRequestHistogram(ctx oldcontext.Context, method string, metric *prometheus.HistogramVec, f func(context.Context) error) error { - return CollectedRequest(ctx, method, NewHistogramCollector(metric), ErrorCode, f) -} - -// TimeRequestHistogramStatus runs 'f' and records how long it took in the given Prometheus -// histogram metric. If 'f' returns successfully, record a "200". Otherwise, record -// "500". It will also emit an OpenTracing span if you have a global tracer configured. -// -// Deprecated: Use CollectedRequest() -func TimeRequestHistogramStatus(ctx oldcontext.Context, method string, metric *prometheus.HistogramVec, toStatusCode func(error) string, f func(context.Context) error) error { - return CollectedRequest(ctx, method, NewHistogramCollector(metric), toStatusCode, f) -} diff --git a/vendor/github.com/grafana/dskit/kv/client.go b/vendor/github.com/grafana/dskit/kv/client.go index 42bf5595..0599f670 100644 --- a/vendor/github.com/grafana/dskit/kv/client.go +++ b/vendor/github.com/grafana/dskit/kv/client.go @@ -105,13 +105,16 @@ type Client interface { // be returned if the key does not exist. Delete(ctx context.Context, key string) error - // CAS stands for Compare-And-Swap. Will call provided callback f with the + // CAS stands for Compare-And-Swap. Will call provided callback f with the // current value of the key and allow callback to return a different value. // Will then attempt to atomically swap the current value for the new value. // If that doesn't succeed will try again - callback will be called again - // with new value etc. Guarantees that only a single concurrent CAS - // succeeds. Callback can return nil to indicate it is happy with existing + // with new value etc. Guarantees that only a single concurrent CAS + // succeeds. Callback can return nil to indicate it is happy with existing // value. + // + // If the callback returns an error and true for retry, and the max number of + // attempts is not exceeded, the operation will be retried. CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error // WatchKey calls f whenever the value stored under key changes. diff --git a/vendor/github.com/grafana/dskit/kv/consul/client.go b/vendor/github.com/grafana/dskit/kv/consul/client.go index c7ef8d37..5501a67d 100644 --- a/vendor/github.com/grafana/dskit/kv/consul/client.go +++ b/vendor/github.com/grafana/dskit/kv/consul/client.go @@ -14,9 +14,10 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/hashicorp/go-cleanhttp" "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/instrument" "golang.org/x/time/rate" + "github.com/grafana/dskit/instrument" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv/codec" @@ -252,7 +253,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b } if kvp == nil { - level.Info(c.logger).Log("msg", "value is nil", "key", key, "index", index) + level.Debug(c.logger).Log("msg", "value is nil", "key", key, "index", index) continue } @@ -401,7 +402,7 @@ func (c *Client) createRateLimiter() *rate.Limiter { // WithCodec Clones and changes the codec of the consul client. func (c *Client) WithCodec(codec codec.Codec) *Client { - new := *c - new.codec = codec - return &new + n := *c + n.codec = codec + return &n } diff --git a/vendor/github.com/grafana/dskit/kv/consul/metrics.go b/vendor/github.com/grafana/dskit/kv/consul/metrics.go index 52a1d4e8..166e79bc 100644 --- a/vendor/github.com/grafana/dskit/kv/consul/metrics.go +++ b/vendor/github.com/grafana/dskit/kv/consul/metrics.go @@ -6,7 +6,8 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/instrument" + + "github.com/grafana/dskit/instrument" ) type consulInstrumentation struct { diff --git a/vendor/github.com/grafana/dskit/kv/consul/mock.go b/vendor/github.com/grafana/dskit/kv/consul/mock.go index f1f6937f..e2f434b6 100644 --- a/vendor/github.com/grafana/dskit/kv/consul/mock.go +++ b/vendor/github.com/grafana/dskit/kv/consul/mock.go @@ -95,7 +95,7 @@ func (m *mockKV) loop() { } } -func (m *mockKV) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMeta, error) { +func (m *mockKV) Put(p *consul.KVPair, _ *consul.WriteOptions) (*consul.WriteMeta, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -119,7 +119,7 @@ func (m *mockKV) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMet return nil, nil } -func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) { +func (m *mockKV) CAS(p *consul.KVPair, _ *consul.WriteOptions) (bool, *consul.WriteMeta, error) { level.Debug(m.logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value)) m.mtx.Lock() @@ -226,7 +226,7 @@ func (m *mockKV) List(prefix string, q *consul.QueryOptions) (consul.KVPairs, *c return result, &consul.QueryMeta{LastIndex: m.current}, nil } -func (m *mockKV) Delete(key string, q *consul.WriteOptions) (*consul.WriteMeta, error) { +func (m *mockKV) Delete(key string, _ *consul.WriteOptions) (*consul.WriteMeta, error) { m.mtx.Lock() defer m.mtx.Unlock() delete(m.kvps, key) diff --git a/vendor/github.com/grafana/dskit/kv/etcd/mock.go b/vendor/github.com/grafana/dskit/kv/etcd/mock.go index b7ee2764..6349cee1 100644 --- a/vendor/github.com/grafana/dskit/kv/etcd/mock.go +++ b/vendor/github.com/grafana/dskit/kv/etcd/mock.go @@ -62,11 +62,11 @@ func newMockKV() *mockKV { // // Known limitations: // -// * Compact is not implemented and will panic -// * RequestProgress is not implemented and will panic -// * Only exact and prefix matching is supported for Get, Put, and Delete -// * There may be inconsistencies with how various version numbers are adjusted -// but none that are exposed by kv.Client unit tests +// - Compact is not implemented and will panic +// - RequestProgress is not implemented and will panic +// - Only exact and prefix matching is supported for Get, Put, and Delete +// - There may be inconsistencies with how various version numbers are adjusted +// but none that are exposed by kv.Client unit tests type mockKV struct { // Key-value pairs created by put calls or transactions values map[string]mvccpb.KeyValue diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index d18b6d45..693964b5 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -72,7 +72,7 @@ func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { } // Delete is part of kv.Client interface. -func (c *Client) Delete(ctx context.Context, key string) error { +func (c *Client) Delete(_ context.Context, _ string) error { return errors.New("memberlist does not support Delete") } @@ -164,9 +164,7 @@ type KVConfig struct { TCPTransport TCPTransportConfig `yaml:",inline"` - // Where to put custom metrics. Metrics are not registered, if this is nil. - MetricsRegisterer prometheus.Registerer `yaml:"-"` - MetricsNamespace string `yaml:"-"` + MetricsNamespace string `yaml:"-"` // Codecs to register. Codecs need to be registered before joining other members. Codecs []codec.Codec `yaml:"-"` @@ -224,7 +222,7 @@ func generateRandomSuffix(logger log.Logger) string { // If joining of the cluster if configured, it is done in Running state, and if join fails and Abort flag is set, service // fails. type KV struct { - services.Service + services.NamedService cfg KVConfig logger log.Logger @@ -354,7 +352,6 @@ var ( // trigger connecting to the existing memberlist cluster. If that fails and AbortIfJoinFails is true, error is returned // and service enters Failed state. func NewKV(cfg KVConfig, logger log.Logger, dnsProvider DNSProvider, registerer prometheus.Registerer) *KV { - cfg.TCPTransport.MetricsRegisterer = cfg.MetricsRegisterer cfg.TCPTransport.MetricsNamespace = cfg.MetricsNamespace mlkv := &KV{ @@ -377,7 +374,8 @@ func NewKV(cfg KVConfig, logger log.Logger, dnsProvider DNSProvider, registerer mlkv.codecs[c.CodecID()] = c } - mlkv.Service = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping) + mlkv.NamedService = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping).WithName("memberlist_kv") + return mlkv } @@ -386,7 +384,7 @@ func defaultMemberlistConfig() *memberlist.Config { } func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { - tr, err := NewTCPTransport(m.cfg.TCPTransport, m.logger) + tr, err := NewTCPTransport(m.cfg.TCPTransport, m.logger, m.registerer) if err != nil { return nil, fmt.Errorf("failed to create transport: %v", err) } @@ -488,17 +486,17 @@ func (m *KV) running(ctx context.Context) error { tickerChan = t.C } + logger := log.With(m.logger, "phase", "periodic_rejoin") for { select { case <-tickerChan: - members := m.discoverMembers(ctx, m.cfg.JoinMembers) - - reached, err := m.memberlist.Join(members) + const numAttempts = 1 // don't retry if resolution fails, we will try again next time + reached, err := m.joinMembersWithRetries(ctx, numAttempts, logger) if err == nil { - level.Info(m.logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) + level.Info(logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) } else { // Don't report error from rejoin, otherwise KV service would be stopped completely. - level.Warn(m.logger).Log("msg", "re-joining memberlist cluster failed", "err", err) + level.Warn(logger).Log("msg", "re-joining memberlist cluster failed", "err", err, "next_try_in", m.cfg.RejoinInterval) } case <-ctx.Done(): @@ -543,7 +541,7 @@ func (m *KV) fastJoinMembersOnStartup(ctx context.Context) { level.Info(m.logger).Log("msg", "memberlist fast-join starting", "nodes_found", len(nodes), "to_join", toJoin) totalJoined := 0 - for toJoin > 0 && len(nodes) > 0 { + for toJoin > 0 && len(nodes) > 0 && ctx.Err() == nil { reached, err := m.memberlist.Join(nodes[0:1]) // Try to join single node only. if err != nil { level.Debug(m.logger).Log("msg", "fast-joining node failed", "node", nodes[0], "err", err) @@ -571,41 +569,122 @@ func (m *KV) joinMembersOnStartup(ctx context.Context) bool { return true } + logger := log.With(m.logger, "phase", "startup") + level.Info(logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) startTime := time.Now() + reached, err := m.joinMembersWithRetries(ctx, m.cfg.MaxJoinRetries, logger) + if err != nil { + level.Error(logger).Log("msg", "joining memberlist cluster failed", "err", err, "elapsed_time", time.Since(startTime)) + return false + } + level.Info(logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) + return true +} - level.Info(m.logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) - - cfg := backoff.Config{ - MinBackoff: m.cfg.MinJoinBackoff, - MaxBackoff: m.cfg.MaxJoinBackoff, - MaxRetries: m.cfg.MaxJoinRetries, +// joinMembersWithRetries joins m.cfg.JoinMembers 100 at a time. After each batch of 100 it rediscoveres the members. +// This helps when the list of members is big and by the time we reach the end the originally resolved addresses may be obsolete. +// joinMembersWithRetries returns an error iff it couldn't successfully join any node OR the context was cancelled. +func (m *KV) joinMembersWithRetries(ctx context.Context, numAttempts int, logger log.Logger) (int, error) { + var ( + cfg = backoff.Config{ + MinBackoff: m.cfg.MinJoinBackoff, + MaxBackoff: m.cfg.MaxJoinBackoff, + MaxRetries: numAttempts, + } + boff = backoff.New(ctx, cfg) + err error + successfullyJoined = 0 + ) + + for ; boff.Ongoing(); boff.Wait() { + successfullyJoined, err = m.joinMembersInBatches(ctx) + if successfullyJoined > 0 { + // If there are _some_ successful joins, then we can consider the join done. + // Mimicking the Join semantics we return an error only when we couldn't join any node at all + err = nil + break + } + level.Warn(logger).Log("msg", "joining memberlist cluster", "attempts", boff.NumRetries()+1, "max_attempts", numAttempts, "err", err) + } + if err == nil && boff.Err() != nil { + err = fmt.Errorf("joining memberlist: %w", boff.Err()) } - boff := backoff.New(ctx, cfg) - var lastErr error + return successfullyJoined, err +} - for boff.Ongoing() { - // We rejoin all nodes, including those that were joined during "fast-join". - // This is harmless and simpler. - nodes := m.discoverMembers(ctx, m.cfg.JoinMembers) +// joinMembersInBatches joins m.cfg.JoinMembers and re-resolves the address of m.cfg.JoinMembers after joining 100 nodes. +// joinMembersInBatches returns the number of nodes joined. joinMembersInBatches returns an error only when the +// number of joined nodes is 0. +func (m *KV) joinMembersInBatches(ctx context.Context) (int, error) { + const batchSize = 100 + var ( + attemptedNodes = make(map[string]bool) + successfullyJoined = 0 + lastErr error + batch = make([]string, batchSize) + nodes []string + ) + for moreAvailableNodes := true; ctx.Err() == nil && moreAvailableNodes; { + // Rediscover nodes and try to join a subset of them with each batch. + // When the list of nodes is large by the time we reach the end of the list some of the + // IPs can be unreachable. + newlyResolved := m.discoverMembers(ctx, m.cfg.JoinMembers) + if len(newlyResolved) > 0 { + // If the resolution fails we keep using the nodes list from the last resolution. + // If that failed too, then we fail the join attempt. + nodes = newlyResolved + } - if len(nodes) > 0 { - reached, err := m.memberlist.Join(nodes) // err is only returned if reached==0. - if err == nil { - level.Info(m.logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) - return true + // Prepare batch + batch = batch[:0] + moreAvailableNodes = false + for _, n := range nodes { + if attemptedNodes[n] { + continue } - level.Warn(m.logger).Log("msg", "joining memberlist cluster: failed to reach any nodes", "retries", boff.NumRetries(), "err", err) - lastErr = err - } else { - level.Warn(m.logger).Log("msg", "joining memberlist cluster: found no nodes to join", "retries", boff.NumRetries()) + if len(batch) >= batchSize { + moreAvailableNodes = true + break + } + batch = append(batch, n) + attemptedNodes[n] = true } - boff.Wait() + // Join batch + joinedInBatch, err := m.joinMembersBatch(ctx, batch) + if err != nil { + lastErr = err + } + successfullyJoined += joinedInBatch + } + if successfullyJoined > 0 { + return successfullyJoined, nil + } + if successfullyJoined == 0 && lastErr == nil { + return 0, errors.New("found no nodes to join") } + return 0, lastErr +} - level.Error(m.logger).Log("msg", "joining memberlist cluster failed", "last_error", lastErr, "elapsed_time", time.Since(startTime)) - return false +// joinMembersBatch returns an error only if it couldn't successfully join any nodes or if ctx is cancelled. +func (m *KV) joinMembersBatch(ctx context.Context, nodes []string) (successfullyJoined int, lastErr error) { + for nodeIdx := range nodes { + if ctx.Err() != nil { + return successfullyJoined, fmt.Errorf("joining batch: %w", context.Cause(ctx)) + } + // Attempt to join a single node. + // The cost of calling Join shouldn't be different between passing all nodes in one invocation versus passing a single node per invocation. + reached, err := m.memberlist.Join(nodes[nodeIdx : nodeIdx+1]) + successfullyJoined += reached + if err != nil { + lastErr = err + } + } + if successfullyJoined > 0 { + lastErr = nil + } + return successfullyJoined, lastErr } // Provides a dns-based member disovery to join a memberlist cluster w/o knowning members' addresses upfront. @@ -693,7 +772,7 @@ func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) { } // Returns current value with removed tombstones. -func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, err error) { +func (m *KV) get(key string, _ codec.Codec) (out interface{}, version uint, err error) { m.storeMu.Lock() v := m.store[key].Clone() m.storeMu.Unlock() @@ -983,7 +1062,7 @@ func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec } // NodeMeta is method from Memberlist Delegate interface -func (m *KV) NodeMeta(limit int) []byte { +func (m *KV) NodeMeta(_ int) []byte { // we can send local state from here (512 bytes only) // if state is updated, we need to tell memberlist to distribute it. return nil @@ -1117,7 +1196,7 @@ func (m *KV) GetBroadcasts(overhead, limit int) [][]byte { // This is "pull" part of push/pull sync (either periodic, or when new node joins the cluster). // Here we dump our entire state -- all keys and their values. There is no limit on message size here, // as Memberlist uses 'stream' operations for transferring this state. -func (m *KV) LocalState(join bool) []byte { +func (m *KV) LocalState(_ bool) []byte { if !m.delegateReady.Load() { return nil } @@ -1186,12 +1265,12 @@ func (m *KV) LocalState(join bool) []byte { return buf.Bytes() } -// MergeRemoteState is method from Memberlist Delegate interface +// MergeRemoteState is a method from the Memberlist Delegate interface. // // This is 'push' part of push/pull sync. We merge incoming KV store (all keys and values) with ours. // // Data is full state of remote KV store, as generated by LocalState method (run on another node). -func (m *KV) MergeRemoteState(data []byte, join bool) { +func (m *KV) MergeRemoteState(data []byte, _ bool) { if !m.delegateReady.Load() { return } diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/metrics.go b/vendor/github.com/grafana/dskit/kv/memberlist/metrics.go index 9ab56a66..75a6b232 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/metrics.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/metrics.go @@ -171,17 +171,25 @@ func (m *KV) createAndRegisterMetrics() { Help: "Number of dropped notifications in WatchPrefix function", }, []string{"prefix"}) - if m.cfg.MetricsRegisterer == nil { + if m.registerer == nil { return } + m.registerer.MustRegister(m) + // memberlist uses armonmetrics package for internal usage - // here we configure armonmetrics to use prometheus - sink, err := armonprometheus.NewPrometheusSink() // there is no option to pass registrerer, this uses default + // here we configure armonmetrics to use prometheus. + opts := armonprometheus.PrometheusOpts{ + Expiration: 0, // Don't expire metrics. + Registerer: m.registerer, + } + + sink, err := armonprometheus.NewPrometheusSinkFrom(opts) if err == nil { cfg := armonmetrics.DefaultConfig("") cfg.EnableHostname = false // no need to put hostname into metric cfg.EnableHostnameLabel = false // no need to put hostname into labels + cfg.EnableServiceLabel = false // Don't put service name into a label. (We don't set service name anyway). cfg.EnableRuntimeMetrics = false // metrics about Go runtime already provided by prometheus cfg.EnableTypePrefix = true // to make better sense of internal memberlist metrics cfg.TimerGranularity = time.Second // timers are in seconds in prometheus world diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go index f0b8846f..751ad116 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go @@ -14,7 +14,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/hashicorp/go-sockaddr" + sockaddr "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/memberlist" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -23,6 +23,7 @@ import ( dstls "github.com/grafana/dskit/crypto/tls" "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/netutil" ) type messageType uint8 @@ -34,10 +35,11 @@ const ( ) const zeroZeroZeroZero = "0.0.0.0" +const colonColon = "::" // TCPTransportConfig is a configuration structure for creating new TCPTransport. type TCPTransportConfig struct { - // BindAddrs is a list of addresses to bind to. + // BindAddrs is a list of IP addresses to bind to. BindAddrs flagext.StringSlice `yaml:"bind_addr"` // BindPort is the port to listen on, for each address above. @@ -54,8 +56,7 @@ type TCPTransportConfig struct { TransportDebug bool `yaml:"-" category:"advanced"` // Where to put custom metrics. nil = don't register. - MetricsRegisterer prometheus.Registerer `yaml:"-"` - MetricsNamespace string `yaml:"-"` + MetricsNamespace string `yaml:"-"` TLSEnabled bool `yaml:"tls_enabled" category:"advanced"` TLS dstls.ClientConfig `yaml:",inline"` @@ -111,7 +112,7 @@ type TCPTransport struct { // NewTCPTransport returns a new tcp-based transport with the given configuration. On // success all the network listeners will be created and listening. -func NewTCPTransport(config TCPTransportConfig, logger log.Logger) (*TCPTransport, error) { +func NewTCPTransport(config TCPTransportConfig, logger log.Logger, registerer prometheus.Registerer) (*TCPTransport, error) { if len(config.BindAddrs) == 0 { config.BindAddrs = []string{zeroZeroZeroZero} } @@ -133,7 +134,7 @@ func NewTCPTransport(config TCPTransportConfig, logger log.Logger) (*TCPTranspor } } - t.registerMetrics(config.MetricsRegisterer) + t.registerMetrics(registerer) // Clean up listeners if there's an error. defer func() { @@ -146,6 +147,9 @@ func NewTCPTransport(config TCPTransportConfig, logger log.Logger) (*TCPTranspor port := config.BindPort for _, addr := range config.BindAddrs { ip := net.ParseIP(addr) + if ip == nil { + return nil, fmt.Errorf("could not parse bind addr %q as IP address", addr) + } tcpAddr := &net.TCPAddr{IP: ip, Port: port} @@ -358,13 +362,11 @@ func (t *TCPTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, err return nil, 0, fmt.Errorf("failed to parse advertise address %q", ip) } - // Ensure IPv4 conversion if necessary. - if ip4 := advertiseAddr.To4(); ip4 != nil { - advertiseAddr = ip4 - } advertisePort = port } else { - if t.cfg.BindAddrs[0] == zeroZeroZeroZero { + + switch t.cfg.BindAddrs[0] { + case zeroZeroZeroZero: // Otherwise, if we're not bound to a specific IP, let's // use a suitable private IP address. var err error @@ -378,9 +380,19 @@ func (t *TCPTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, err advertiseAddr = net.ParseIP(ip) if advertiseAddr == nil { - return nil, 0, fmt.Errorf("failed to parse advertise address: %q", ip) + return nil, 0, fmt.Errorf("failed to parse advertise address %q", ip) } - } else { + case colonColon: + inet6Ip, err := netutil.GetFirstAddressOf(nil, t.logger, true) + if err != nil { + return nil, 0, fmt.Errorf("failed to get private inet6 address: %w", err) + } + + advertiseAddr = net.ParseIP(inet6Ip) + if advertiseAddr == nil { + return nil, 0, fmt.Errorf("failed to parse inet6 advertise address %q", ip) + } + default: // Use the IP that we're bound to, based on the first // TCP listener, which we already ensure is there. advertiseAddr = t.tcpListeners[0].Addr().(*net.TCPAddr).IP diff --git a/vendor/github.com/grafana/dskit/kv/metrics.go b/vendor/github.com/grafana/dskit/kv/metrics.go index 66fe9fa9..7361b8c4 100644 --- a/vendor/github.com/grafana/dskit/kv/metrics.go +++ b/vendor/github.com/grafana/dskit/kv/metrics.go @@ -6,8 +6,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/instrument" + + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/instrument" ) // RegistererWithKVName wraps the provided Registerer with the KV name label. If a nil reg diff --git a/vendor/github.com/grafana/dskit/kv/mock.go b/vendor/github.com/grafana/dskit/kv/mock.go index 5b446d85..59d74306 100644 --- a/vendor/github.com/grafana/dskit/kv/mock.go +++ b/vendor/github.com/grafana/dskit/kv/mock.go @@ -16,24 +16,24 @@ func buildMockClient(logger log.Logger) (Client, error) { return mockClient{}, nil } -func (m mockClient) List(ctx context.Context, prefix string) ([]string, error) { +func (m mockClient) List(_ context.Context, _ string) ([]string, error) { return []string{}, nil } -func (m mockClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m mockClient) Get(_ context.Context, _ string) (interface{}, error) { return "", nil } -func (m mockClient) Delete(ctx context.Context, key string) error { +func (m mockClient) Delete(_ context.Context, _ string) error { return nil } -func (m mockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m mockClient) CAS(_ context.Context, _ string, _ func(in interface{}) (out interface{}, retry bool, err error)) error { return nil } -func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m mockClient) WatchKey(_ context.Context, _ string, _ func(interface{}) bool) { } -func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m mockClient) WatchPrefix(_ context.Context, _ string, _ func(string, interface{}) bool) { } diff --git a/vendor/github.com/grafana/loki/pkg/util/log/line_buffer.go b/vendor/github.com/grafana/dskit/log/buffered.go similarity index 63% rename from vendor/github.com/grafana/loki/pkg/util/log/line_buffer.go rename to vendor/github.com/grafana/dskit/log/buffered.go index 63051840..301ea8e6 100644 --- a/vendor/github.com/grafana/loki/pkg/util/log/line_buffer.go +++ b/vendor/github.com/grafana/dskit/log/buffered.go @@ -1,3 +1,6 @@ +// Provenance-includes-location: https://github.com/grafana/loki/blob/7c78d7ea44afb420847255f9f5a4f677ad0f47bf/pkg/util/log/line_buffer.go +// Provenance-includes-location: https://github.com/grafana/mimir/blob/c8b24a462f7e224950409e7e0a4e0a58f3a79599/pkg/util/log/line_buffer.go +// Provenance-includes-copyright: Grafana Labs package log import ( @@ -9,9 +12,9 @@ import ( "go.uber.org/atomic" ) -// LineBufferedLogger buffers log lines to be flushed periodically. Without a line buffer, Log() will call the write +// BufferedLogger buffers log lines to be flushed periodically. Without a line buffer, Log() will call the write // syscall for every log line which is expensive if logging thousands of lines per second. -type LineBufferedLogger struct { +type BufferedLogger struct { buf *threadsafeBuffer entries atomic.Uint32 cap uint32 @@ -21,13 +24,13 @@ type LineBufferedLogger struct { } // Size returns the number of entries in the buffer. -func (l *LineBufferedLogger) Size() uint32 { +func (l *BufferedLogger) Size() uint32 { return l.entries.Load() } // Write writes the given bytes to the line buffer, and increments the entries counter. // If the buffer is full (entries == cap), it will be flushed, and the entries counter reset. -func (l *LineBufferedLogger) Write(p []byte) (n int, err error) { +func (l *BufferedLogger) Write(p []byte) (n int, err error) { // when we've filled the buffer, flush it if l.Size() >= l.cap { // Flush resets the size to 0 @@ -43,15 +46,13 @@ func (l *LineBufferedLogger) Write(p []byte) (n int, err error) { } // Flush forces the buffer to be written to the underlying writer. -func (l *LineBufferedLogger) Flush() error { - sz := l.Size() +func (l *BufferedLogger) Flush() error { + // reset the counter + sz := l.entries.Swap(0) if sz <= 0 { return nil } - // reset the counter - l.entries.Store(0) - // WriteTo() calls Reset() on the underlying buffer, so it's not needed here _, err := l.buf.WriteTo(l.w) @@ -63,11 +64,11 @@ func (l *LineBufferedLogger) Flush() error { return err } -type LineBufferedLoggerOption func(*LineBufferedLogger) +type BufferedLoggerOption func(*BufferedLogger) -// WithFlushPeriod creates a new LineBufferedLoggerOption that sets the flush period for the LineBufferedLogger. -func WithFlushPeriod(d time.Duration) LineBufferedLoggerOption { - return func(l *LineBufferedLogger) { +// WithFlushPeriod creates a new BufferedLoggerOption that sets the flush period for the BufferedLogger. +func WithFlushPeriod(d time.Duration) BufferedLoggerOption { + return func(l *BufferedLogger) { go func() { tick := time.NewTicker(d) defer tick.Stop() @@ -81,23 +82,23 @@ func WithFlushPeriod(d time.Duration) LineBufferedLoggerOption { // WithFlushCallback allows for a callback function to be executed when Flush() is called. // The length of the buffer at the time of the Flush() will be passed to the function. -func WithFlushCallback(fn func(entries uint32)) LineBufferedLoggerOption { - return func(l *LineBufferedLogger) { +func WithFlushCallback(fn func(entries uint32)) BufferedLoggerOption { + return func(l *BufferedLogger) { l.onFlush = fn } } // WithPrellocatedBuffer preallocates a buffer to reduce GC cycles and slice resizing. -func WithPrellocatedBuffer(size uint32) LineBufferedLoggerOption { - return func(l *LineBufferedLogger) { +func WithPrellocatedBuffer(size uint32) BufferedLoggerOption { + return func(l *BufferedLogger) { l.buf = newThreadsafeBuffer(bytes.NewBuffer(make([]byte, 0, size))) } } -// NewLineBufferedLogger creates a new LineBufferedLogger with a configured capacity. +// NewBufferedLogger creates a new BufferedLogger with a configured capacity. // Lines are flushed when the context is done, the buffer is full, or the flush period is reached. -func NewLineBufferedLogger(w io.Writer, cap uint32, opts ...LineBufferedLoggerOption) *LineBufferedLogger { - l := &LineBufferedLogger{ +func NewBufferedLogger(w io.Writer, cap uint32, opts ...BufferedLoggerOption) *BufferedLogger { + l := &BufferedLogger{ w: w, buf: newThreadsafeBuffer(bytes.NewBuffer([]byte{})), cap: cap, @@ -112,31 +113,30 @@ func NewLineBufferedLogger(w io.Writer, cap uint32, opts ...LineBufferedLoggerOp // threadsafeBuffer wraps the non-threadsafe bytes.Buffer. type threadsafeBuffer struct { - sync.RWMutex - + mx sync.Mutex buf *bytes.Buffer } -// Read returns the contents of the buffer. +// Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n <= len(p)) and any error encountered. func (t *threadsafeBuffer) Read(p []byte) (n int, err error) { - t.RLock() - defer t.RUnlock() + t.mx.Lock() + defer t.mx.Unlock() return t.buf.Read(p) } // Write writes the given bytes to the underlying writer. func (t *threadsafeBuffer) Write(p []byte) (n int, err error) { - t.Lock() - defer t.Unlock() + t.mx.Lock() + defer t.mx.Unlock() return t.buf.Write(p) } // WriteTo writes the buffered lines to the given writer. func (t *threadsafeBuffer) WriteTo(w io.Writer) (n int64, err error) { - t.Lock() - defer t.Unlock() + t.mx.Lock() + defer t.mx.Unlock() return t.buf.WriteTo(w) } @@ -145,8 +145,8 @@ func (t *threadsafeBuffer) WriteTo(w io.Writer) (n int64, err error) { // but it retains the underlying storage for use by future writes. // Reset is the same as Truncate(0). func (t *threadsafeBuffer) Reset() { - t.Lock() - defer t.Unlock() + t.mx.Lock() + defer t.mx.Unlock() t.buf.Reset() } diff --git a/vendor/github.com/grafana/dskit/log/global.go b/vendor/github.com/grafana/dskit/log/global.go new file mode 100644 index 00000000..e32decd6 --- /dev/null +++ b/vendor/github.com/grafana/dskit/log/global.go @@ -0,0 +1,21 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/logging/global.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package log + +import ( + "github.com/go-kit/log" +) + +var global = log.NewNopLogger() + +// Global returns the global logger. +func Global() log.Logger { + return global +} + +// SetGlobal sets the global logger. +func SetGlobal(logger log.Logger) { + global = logger +} diff --git a/vendor/github.com/grafana/dskit/log/gokit.go b/vendor/github.com/grafana/dskit/log/gokit.go new file mode 100644 index 00000000..3a571dac --- /dev/null +++ b/vendor/github.com/grafana/dskit/log/gokit.go @@ -0,0 +1,73 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/logging/gokit.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package log + +import ( + "fmt" + "io" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +const ( + LogfmtFormat = "logfmt" + JSONFormat = "json" +) + +// NewGoKit creates a new GoKit logger with the given format. +// If the given format is empty or unknown, logfmt is used. +// No additional fields nor filters are added to the created logger, and +// if they are required, the caller is expected to add them. +func NewGoKit(format string) log.Logger { + writer := log.NewSyncWriter(os.Stderr) + return newGoKit(format, writer) +} + +// NewGoKitWithLevel creates a new GoKit logger with the given level and format. +// If the given format is empty or unknown, logfmt is used. +func NewGoKitWithLevel(lvl Level, format string) log.Logger { + logger := NewGoKit(format) + return level.NewFilter(logger, lvl.Option) +} + +// NewGoKitWithWriter creates a new GoKit logger with the given format and writer. +// The input writer must be provided, must be thread-safe, and the caller is +// expected to guarantee these requirements. +// If the given format is empty or unknown, logfmt is used. +// No additional fields nor filters are added to the created logger, and +// if they are required, the caller is expected to add them. +func NewGoKitWithWriter(format string, writer io.Writer) log.Logger { + return newGoKit(format, writer) +} + +func newGoKit(format string, writer io.Writer) log.Logger { + if format == JSONFormat { + return log.NewJSONLogger(writer) + } + return log.NewLogfmtLogger(writer) +} + +// stand-alone for test purposes +func addStandardFields(logger log.Logger) log.Logger { + return log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.Caller(5)) +} + +type Sprintf struct { + format string + args []interface{} +} + +func LazySprintf(format string, args ...interface{}) *Sprintf { + return &Sprintf{ + format: format, + args: args, + } +} + +func (s *Sprintf) String() string { + return fmt.Sprintf(s.format, s.args...) +} diff --git a/vendor/github.com/weaveworks/common/logging/level.go b/vendor/github.com/grafana/dskit/log/level.go similarity index 81% rename from vendor/github.com/weaveworks/common/logging/level.go rename to vendor/github.com/grafana/dskit/log/level.go index d7b92eaa..c2680c41 100644 --- a/vendor/github.com/weaveworks/common/logging/level.go +++ b/vendor/github.com/grafana/dskit/log/level.go @@ -1,4 +1,8 @@ -package logging +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/logging/level.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package log // Copy-pasted from prometheus/common/promlog. // Copyright 2017 The Prometheus Authors @@ -19,20 +23,17 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) -// Level is a settable identifier for the minimum level a log entry -// must be have. +// Level is a settable identifier for the minimum level a log entry must have. type Level struct { s string - Logrus logrus.Level - Gokit level.Option + Option level.Option } // RegisterFlags adds the log level flag to the provided flagset. func (l *Level) RegisterFlags(f *flag.FlagSet) { - l.Set("info") + _ = l.Set("info") f.Var(l, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error]") } @@ -58,17 +59,13 @@ func (l Level) MarshalYAML() (interface{}, error) { func (l *Level) Set(s string) error { switch s { case "debug": - l.Logrus = logrus.DebugLevel - l.Gokit = level.AllowDebug() + l.Option = level.AllowDebug() case "info": - l.Logrus = logrus.InfoLevel - l.Gokit = level.AllowInfo() + l.Option = level.AllowInfo() case "warn": - l.Logrus = logrus.WarnLevel - l.Gokit = level.AllowWarn() + l.Option = level.AllowWarn() case "error": - l.Logrus = logrus.ErrorLevel - l.Gokit = level.AllowError() + l.Option = level.AllowError() default: return errors.Errorf("unrecognized log level %q", s) } diff --git a/vendor/github.com/grafana/dskit/log/ratelimit.go b/vendor/github.com/grafana/dskit/log/ratelimit.go new file mode 100644 index 00000000..3e831507 --- /dev/null +++ b/vendor/github.com/grafana/dskit/log/ratelimit.go @@ -0,0 +1,70 @@ +package log + +import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/time/rate" +) + +type RateLimitedLogger struct { + next log.Logger + limiter *rate.Limiter + + discardedInfoLogLinesCounter prometheus.Counter + discardedDebugLogLinesCounter prometheus.Counter + discardedWarnLogLinesCounter prometheus.Counter + discardedErrorLogLinesCounter prometheus.Counter +} + +// NewRateLimitedLogger returns a log.Logger that is limited to the given number of logs per second, +// with the given burst size. +func NewRateLimitedLogger(logger log.Logger, logsPerSecond float64, logsBurstSize int, registry prometheus.Registerer) log.Logger { + discardedLogLinesCounter := promauto.With(registry).NewCounterVec(prometheus.CounterOpts{ + Name: "logger_rate_limit_discarded_log_lines_total", + Help: "Total number of discarded log lines per level.", + }, []string{"level"}) + + return &RateLimitedLogger{ + next: logger, + limiter: rate.NewLimiter(rate.Limit(logsPerSecond), logsBurstSize), + discardedInfoLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.InfoValue().String()), + discardedDebugLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.DebugValue().String()), + discardedWarnLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.WarnValue().String()), + discardedErrorLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.ErrorValue().String()), + } +} + +func (l *RateLimitedLogger) Log(keyvals ...interface{}) error { + if l.limiter.Allow() { + return l.next.Log(keyvals...) + } + counter := l.getCounterFromKeyvals(keyvals...) + if counter != nil { + counter.Inc() + } + return nil +} + +func (l *RateLimitedLogger) getCounterFromKeyvals(keyvals ...interface{}) prometheus.Counter { + for i := 0; i < len(keyvals); i += 2 { + levelKey, ok := keyvals[i].(string) + if ok && levelKey == "level" && i+1 < len(keyvals) { + levelValue := keyvals[i+1] + switch levelValue { + case level.InfoValue(): + return l.discardedInfoLogLinesCounter + case level.DebugValue(): + return l.discardedDebugLogLinesCounter + case level.WarnValue(): + return l.discardedWarnLogLinesCounter + case level.ErrorValue(): + return l.discardedErrorLogLinesCounter + default: + return nil + } + } + } + return nil +} diff --git a/vendor/github.com/grafana/dskit/loser/loser.go b/vendor/github.com/grafana/dskit/loser/loser.go new file mode 100644 index 00000000..b02e29f6 --- /dev/null +++ b/vendor/github.com/grafana/dskit/loser/loser.go @@ -0,0 +1,175 @@ +// Loser tree, from https://en.wikipedia.org/wiki/K-way_merge_algorithm#Tournament_Tree + +package loser + +import "golang.org/x/exp/constraints" + +func New[E constraints.Ordered](lists [][]E, maxVal E) *Tree[E] { + nLists := len(lists) + t := Tree[E]{ + maxVal: maxVal, + nodes: make([]node[E], nLists*2), + } + for i, s := range lists { + t.nodes[i+nLists].items = s + t.moveNext(i + nLists) // Must call Next on each item so that At() has a value. + } + if nLists > 0 { + t.nodes[0].index = -1 // flag to be initialized on first call to Next(). + } + return &t +} + +// A loser tree is a binary tree laid out such that nodes N and N+1 have parent N/2. +// We store M leaf nodes in positions M...2M-1, and M-1 internal nodes in positions 1..M-1. +// Node 0 is a special node, containing the winner of the contest. +type Tree[E constraints.Ordered] struct { + maxVal E + nodes []node[E] +} + +type node[E constraints.Ordered] struct { + index int // This is the loser for all nodes except the 0th, where it is the winner. + value E // Value copied from the loser node, or winner for node 0. + items []E // Only populated for leaf nodes. +} + +func (t *Tree[E]) moveNext(index int) bool { + n := &t.nodes[index] + if len(n.items) > 0 { + n.value = n.items[0] + n.items = n.items[1:] + return true + } + n.value = t.maxVal + n.index = -1 + return false +} + +func (t *Tree[E]) Winner() E { + return t.nodes[t.nodes[0].index].value +} + +func (t *Tree[E]) Next() bool { + if len(t.nodes) == 0 { + return false + } + if t.nodes[0].index == -1 { // If tree has not been initialized yet, do that. + t.initialize() + return t.nodes[t.nodes[0].index].index != -1 + } + if t.nodes[t.nodes[0].index].index == -1 { // already exhausted + return false + } + if t.moveNext(t.nodes[0].index) { + t.replayGames(t.nodes[0].index) + } else { + t.sequenceEnded(t.nodes[0].index) + } + return t.nodes[t.nodes[0].index].index != -1 +} + +func (t *Tree[E]) initialize() { + winners := make([]int, len(t.nodes)) + // Initialize leaf nodes as winners to start. + for i := len(t.nodes) / 2; i < len(t.nodes); i++ { + winners[i] = i + } + for i := len(t.nodes) - 2; i > 0; i -= 2 { + // At each stage the winners play each other, and we record the loser in the node. + loser, winner := t.playGame(winners[i], winners[i+1]) + p := parent(i) + t.nodes[p].index = loser + t.nodes[p].value = t.nodes[loser].value + winners[p] = winner + } + t.nodes[0].index = winners[1] + t.nodes[0].value = t.nodes[winners[1]].value +} + +// Starting at pos, which is a winner, re-consider all values up to the root. +func (t *Tree[E]) replayGames(pos int) { + // At the start, pos is a leaf node, and is the winner at that level. + n := parent(pos) + for n != 0 { + // If n.value < pos.value then pos loses. + // If they are equal, pos wins because n could be a sequence that ended, with value maxval. + if t.nodes[n].value < t.nodes[pos].value { + loser := pos + // Record pos as the loser here, and the old loser is the new winner. + pos = t.nodes[n].index + t.nodes[n].index = loser + t.nodes[n].value = t.nodes[loser].value + } + n = parent(n) + } + // pos is now the winner; store it in node 0. + t.nodes[0].index = pos + t.nodes[0].value = t.nodes[pos].value +} + +func (t *Tree[E]) sequenceEnded(pos int) { + // Find the first active sequence which used to lose to it. + n := parent(pos) + for n != 0 && t.nodes[t.nodes[n].index].index == -1 { + n = parent(n) + } + if n == 0 { + // There are no active sequences left + t.nodes[0].index = pos + t.nodes[0].value = t.maxVal + return + } + + // Record pos as the loser here, and the old loser is the new winner. + loser := pos + winner := t.nodes[n].index + t.nodes[n].index = loser + t.nodes[n].value = t.nodes[loser].value + t.replayGames(winner) +} + +func (t *Tree[E]) playGame(a, b int) (loser, winner int) { + if t.nodes[a].value < t.nodes[b].value { + return b, a + } + return a, b +} + +func parent(i int) int { return i / 2 } + +// Add a new list to the merge set +func (t *Tree[E]) Push(list []E) { + // First, see if we can replace one that was previously finished. + for newPos := len(t.nodes) / 2; newPos < len(t.nodes); newPos++ { + if t.nodes[newPos].index == -1 { + t.nodes[newPos].index = newPos + t.nodes[newPos].items = list + t.moveNext(newPos) + t.nodes[0].index = -1 // flag for re-initialize on next call to Next() + return + } + } + // We need to expand the tree. Pick the next biggest power of 2 to amortise resizing cost. + size := 1 + for size <= len(t.nodes)/2 { + size *= 2 + } + newPos := size + len(t.nodes)/2 + newNodes := make([]node[E], size*2) + // Copy data over and fix up the indexes. + for i, n := range t.nodes[len(t.nodes)/2:] { + newNodes[i+size] = n + newNodes[i+size].index = i + size + } + t.nodes = newNodes + t.nodes[newPos].index = newPos + t.nodes[newPos].items = list + // Mark all the empty nodes we have added as finished. + for i := newPos + 1; i < len(t.nodes); i++ { + t.nodes[i].index = -1 + t.nodes[i].value = t.maxVal + } + t.moveNext(newPos) + t.nodes[0].index = -1 // flag for re-initialize on next call to Next() +} diff --git a/vendor/github.com/weaveworks/common/middleware/counting_listener.go b/vendor/github.com/grafana/dskit/middleware/counting_listener.go similarity index 82% rename from vendor/github.com/weaveworks/common/middleware/counting_listener.go rename to vendor/github.com/grafana/dskit/middleware/counting_listener.go index 1e852628..961f71a5 100644 --- a/vendor/github.com/weaveworks/common/middleware/counting_listener.go +++ b/vendor/github.com/grafana/dskit/middleware/counting_listener.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/counting_listener.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( diff --git a/vendor/github.com/weaveworks/common/middleware/errorhandler.go b/vendor/github.com/grafana/dskit/middleware/errorhandler.go similarity index 86% rename from vendor/github.com/weaveworks/common/middleware/errorhandler.go rename to vendor/github.com/grafana/dskit/middleware/errorhandler.go index 0f7ab614..4a48c90c 100644 --- a/vendor/github.com/weaveworks/common/middleware/errorhandler.go +++ b/vendor/github.com/grafana/dskit/middleware/errorhandler.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/errorhandler.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( @@ -51,6 +55,11 @@ func newErrorInterceptor(w http.ResponseWriter, code int) *errorInterceptor { return &i } +// Unwrap method is used by http.ResponseController to get access to original http.ResponseWriter. +func (i *errorInterceptor) Unwrap() http.ResponseWriter { + return i.originalWriter +} + // Header implements http.ResponseWriter func (i *errorInterceptor) Header() http.Header { return i.headers diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go b/vendor/github.com/grafana/dskit/middleware/grpc_auth.go similarity index 85% rename from vendor/github.com/weaveworks/common/middleware/grpc_auth.go rename to vendor/github.com/grafana/dskit/middleware/grpc_auth.go index 10be1f8d..156ddaf1 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_auth.go @@ -1,10 +1,15 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/grpc_auth.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( - "golang.org/x/net/context" + "context" + "google.golang.org/grpc" - "github.com/weaveworks/common/user" + "github.com/grafana/dskit/user" ) // ClientUserHeaderInterceptor propagates the user ID from the context to gRPC metadata, which eventually ends up as a HTTP2 header. @@ -29,7 +34,7 @@ func StreamClientUserHeaderInterceptor(ctx context.Context, desc *grpc.StreamDes } // ServerUserHeaderInterceptor propagates the user ID from the gRPC metadata back to our context. -func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { _, ctx, err := user.ExtractFromGRPCRequest(ctx) if err != nil { return nil, err diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go new file mode 100644 index 00000000..e4052b8e --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go @@ -0,0 +1,246 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/grpc_instrumentation.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package middleware + +import ( + "context" + "errors" + "io" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + + "github.com/grafana/dskit/grpcutil" + "github.com/grafana/dskit/instrument" +) + +func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration, instrumentLabel instrumentationLabel) { + instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, instrumentLabel.getInstrumentationLabel(err), "false"), duration.Seconds()) +} + +// UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. +func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + begin := time.Now() + resp, err := handler(ctx, req) + observe(ctx, hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) + return resp, err + } +} + +// StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency. +func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + begin := time.Now() + err := handler(srv, ss) + observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) + return err + } +} + +// UnaryClientInstrumentInterceptor records duration of gRPC requests client side. +func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) + return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + start := time.Now() + err := invoker(ctx, method, req, resp, cc, opts...) + metric.WithLabelValues(method, instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(start).Seconds()) + return err + } +} + +// StreamClientInstrumentInterceptor records duration of streaming gRPC requests client side. +func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, + streamer grpc.Streamer, opts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + start := time.Now() + stream, err := streamer(ctx, desc, cc, method, opts...) + s := &instrumentedClientStream{ + metric: metric, + start: start, + method: method, + serverStreams: desc.ServerStreams, + finished: atomic.NewBool(false), + finishedChan: make(chan struct{}), + stream: stream, + instrumentationLabel: instrumentationLabel, + } + s.awaitCompletion(ctx) + return s, err + } +} + +// This implementation is heavily inspired by github.com/opentracing-contrib/go-grpc's openTracingClientStream. +type instrumentedClientStream struct { + metric *prometheus.HistogramVec + start time.Time + method string + serverStreams bool + finished *atomic.Bool + finishedChan chan struct{} + stream grpc.ClientStream + instrumentationLabel instrumentationLabel +} + +func (s *instrumentedClientStream) Trailer() metadata.MD { + return s.stream.Trailer() +} + +func (s *instrumentedClientStream) Context() context.Context { + return s.stream.Context() +} + +func (s *instrumentedClientStream) awaitCompletion(ctx context.Context) { + go func() { + select { + case <-s.finishedChan: + // Stream has finished for another reason, nothing more to do. + case <-ctx.Done(): + s.finish(ctx.Err()) + } + }() +} + +func (s *instrumentedClientStream) finish(err error) { + if !s.finished.CompareAndSwap(false, true) { + return + } + + close(s.finishedChan) + + s.metric.WithLabelValues(s.method, s.instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(s.start).Seconds()) +} + +func (s *instrumentedClientStream) SendMsg(m interface{}) error { + err := s.stream.SendMsg(m) + if err == nil || err == io.EOF { + // If SendMsg returns io.EOF, the true error is available from RecvMsg, so we shouldn't consider the stream failed at this point. + return err + } + + s.finish(err) + return err +} + +func (s *instrumentedClientStream) RecvMsg(m interface{}) error { + err := s.stream.RecvMsg(m) + if !s.serverStreams { + // Unary server: this is the only message we'll receive, so the stream has ended. + s.finish(err) + return err + } + + if err == nil { + return nil + } + + if err == io.EOF { + s.finish(nil) + } else { + s.finish(err) + } + + return err +} + +func (s *instrumentedClientStream) Header() (metadata.MD, error) { + md, err := s.stream.Header() + if err != nil { + s.finish(err) + } + return md, err +} + +func (s *instrumentedClientStream) CloseSend() error { + err := s.stream.CloseSend() + if err != nil { + s.finish(err) + } + return err +} + +type InstrumentationOption func(*instrumentationLabel) + +var ( + // ReportGRPCStatusOption is an InstrumentationOption that is used for enabling gRPC status codes to be used + // in instrumentation labels. + ReportGRPCStatusOption InstrumentationOption = func(instrumentationLabel *instrumentationLabel) { + instrumentationLabel.reportGRPCStatus = true + } +) + +func applyInstrumentationOptions(maskHTTPStatuses bool, options ...InstrumentationOption) instrumentationLabel { + instrumentationLabel := instrumentationLabel{maskHTTPStatus: maskHTTPStatuses} + for _, opt := range options { + opt(&instrumentationLabel) + } + return instrumentationLabel +} + +type instrumentationLabel struct { + reportGRPCStatus bool + maskHTTPStatus bool +} + +// getInstrumentationLabel converts an error into an error code string by applying the configurations +// contained in this instrumentationLabel object. +func (i *instrumentationLabel) getInstrumentationLabel(err error) string { + statusCode := errorToStatusCode(err) + return i.statusCodeToString(statusCode) +} + +func (i *instrumentationLabel) statusCodeToString(statusCode codes.Code) string { + if isHTTPStatusCode(statusCode) { + statusFamily := int(statusCode / 100) + if i.maskHTTPStatus { + return strconv.Itoa(statusFamily) + "xx" + } + return strconv.Itoa(int(statusCode)) + } + + if i.reportGRPCStatus { + return statusCode.String() + } + + if statusCode == codes.OK { + if i.maskHTTPStatus { + return "2xx" + } + return "success" + } + + if statusCode == codes.Canceled { + return "cancel" + } + + return "error" +} + +func errorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + + if errors.Is(err, context.Canceled) { + return codes.Canceled + } + + return grpcutil.ErrorToStatusCode(err) +} + +func isHTTPStatusCode(statusCode codes.Code) bool { + return int(statusCode) >= 100 && int(statusCode) < 600 +} diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go new file mode 100644 index 00000000..feab3647 --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go @@ -0,0 +1,93 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/grpc_logging.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package middleware + +import ( + "context" + "errors" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + dskit_log "github.com/grafana/dskit/log" + + "google.golang.org/grpc" + + grpcUtils "github.com/grafana/dskit/grpcutil" + "github.com/grafana/dskit/user" +) + +const ( + gRPC = "gRPC" +) + +// An error can implement ShouldLog() to control whether GRPCServerLog will log. +type OptionalLogging interface { + ShouldLog(ctx context.Context, duration time.Duration) bool +} + +type DoNotLogError struct{ Err error } + +func (i DoNotLogError) Error() string { return i.Err.Error() } +func (i DoNotLogError) Unwrap() error { return i.Err } +func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } + +// GRPCServerLog logs grpc requests, errors, and latency. +type GRPCServerLog struct { + Log log.Logger + // WithRequest will log the entire request rather than just the error + WithRequest bool + DisableRequestSuccessLog bool +} + +// UnaryServerInterceptor returns an interceptor that logs gRPC requests +func (s GRPCServerLog) UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + begin := time.Now() + resp, err := handler(ctx, req) + if err == nil && s.DisableRequestSuccessLog { + return resp, nil + } + var optional OptionalLogging + if errors.As(err, &optional) && !optional.ShouldLog(ctx, time.Since(begin)) { + return resp, err + } + + entry := log.With(user.LogWith(ctx, s.Log), "method", info.FullMethod, "duration", time.Since(begin)) + if err != nil { + if s.WithRequest { + entry = log.With(entry, "request", req) + } + if grpcUtils.IsCanceled(err) { + level.Debug(entry).Log("msg", gRPC, "err", err) + } else { + level.Warn(entry).Log("msg", gRPC, "err", err) + } + } else { + level.Debug(entry).Log("msg", dskit_log.LazySprintf("%s (success)", gRPC)) + } + return resp, err +} + +// StreamServerInterceptor returns an interceptor that logs gRPC requests +func (s GRPCServerLog) StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + begin := time.Now() + err := handler(srv, ss) + if err == nil && s.DisableRequestSuccessLog { + return nil + } + + entry := log.With(user.LogWith(ss.Context(), s.Log), "method", info.FullMethod, "duration", time.Since(begin)) + if err != nil { + if grpcUtils.IsCanceled(err) { + level.Debug(entry).Log("msg", gRPC, "err", err) + } else { + level.Warn(entry).Log("msg", gRPC, "err", err) + } + } else { + level.Debug(entry).Log("msg", dskit_log.LazySprintf("%s (success)", gRPC)) + } + return err +} diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_stats.go b/vendor/github.com/grafana/dskit/middleware/grpc_stats.go similarity index 91% rename from vendor/github.com/weaveworks/common/middleware/grpc_stats.go rename to vendor/github.com/grafana/dskit/middleware/grpc_stats.go index 6c2581c4..3d29d9ba 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_stats.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_stats.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/grpc_stats.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( diff --git a/vendor/github.com/weaveworks/common/middleware/header_adder.go b/vendor/github.com/grafana/dskit/middleware/header_adder.go similarity index 71% rename from vendor/github.com/weaveworks/common/middleware/header_adder.go rename to vendor/github.com/grafana/dskit/middleware/header_adder.go index ce7f07df..ffd5cc8d 100644 --- a/vendor/github.com/weaveworks/common/middleware/header_adder.go +++ b/vendor/github.com/grafana/dskit/middleware/header_adder.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/header_adder.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( diff --git a/vendor/github.com/weaveworks/common/middleware/http_auth.go b/vendor/github.com/grafana/dskit/middleware/http_auth.go similarity index 66% rename from vendor/github.com/weaveworks/common/middleware/http_auth.go rename to vendor/github.com/grafana/dskit/middleware/http_auth.go index 704d1dab..2b576a92 100644 --- a/vendor/github.com/weaveworks/common/middleware/http_auth.go +++ b/vendor/github.com/grafana/dskit/middleware/http_auth.go @@ -1,9 +1,13 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/http_auth.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( "net/http" - "github.com/weaveworks/common/user" + "github.com/grafana/dskit/user" ) // AuthenticateUser propagates the user ID from HTTP headers back to the request's context. diff --git a/vendor/github.com/grafana/dskit/middleware/http_tracing.go b/vendor/github.com/grafana/dskit/middleware/http_tracing.go new file mode 100644 index 00000000..989f50fe --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/http_tracing.go @@ -0,0 +1,145 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/http_tracing.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package middleware + +import ( + "fmt" + "net/http" + + "github.com/gorilla/mux" + "github.com/opentracing-contrib/go-stdlib/nethttp" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +// Dummy dependency to enforce that we have a nethttp version newer +// than the one which implements Websockets. (No semver on nethttp) +var _ = nethttp.MWURLTagFunc + +// Tracer is a middleware which traces incoming requests. +type Tracer struct { + RouteMatcher RouteMatcher + SourceIPs *SourceIPExtractor +} + +// Wrap implements Interface +func (t Tracer) Wrap(next http.Handler) http.Handler { + options := []nethttp.MWOption{ + nethttp.OperationNameFunc(makeHTTPOperationNameFunc(t.RouteMatcher)), + nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) { + // add a tag with the client's user agent to the span + userAgent := r.Header.Get("User-Agent") + if userAgent != "" { + sp.SetTag("http.user_agent", userAgent) + } + + // add a tag with the client's sourceIPs to the span, if a + // SourceIPExtractor is given. + if t.SourceIPs != nil { + sp.SetTag("sourceIPs", t.SourceIPs.Get(r)) + } + }), + } + + return nethttp.Middleware(opentracing.GlobalTracer(), next, options...) +} + +// HTTPGRPCTracer is a middleware which traces incoming httpgrpc requests. +type HTTPGRPCTracer struct { + RouteMatcher RouteMatcher +} + +// InitHTTPGRPCMiddleware initializes gorilla/mux-compatible HTTP middleware +// +// HTTPGRPCTracer is specific to the server-side handling of HTTP requests which were +// wrapped into gRPC requests and routed through the httpgrpc.HTTP/Handle gRPC. +// +// HTTPGRPCTracer.Wrap must be attached to the same mux.Router assigned to dskit/server.Config.Router +// but it does not need to be attached to dskit/server.Config.HTTPMiddleware. +// dskit/server.Config.HTTPMiddleware is applied to direct HTTP requests not routed through gRPC; +// the server utilizes the default http middleware Tracer.Wrap for those standard http requests. +func InitHTTPGRPCMiddleware(router *mux.Router) *mux.Router { + middleware := HTTPGRPCTracer{RouteMatcher: router} + router.Use(middleware.Wrap) + return router +} + +// Wrap creates and decorates server-side tracing spans for httpgrpc requests +// +// The httpgrpc client wraps HTTP requests up into a generic httpgrpc.HTTP/Handle gRPC method. +// The httpgrpc server unwraps httpgrpc.HTTP/Handle gRPC requests into HTTP requests +// and forwards them to its own internal HTTP router. +// +// By default, the server-side tracing spans for the httpgrpc.HTTP/Handle gRPC method +// have no data about the wrapped HTTP request being handled. +// +// HTTPGRPCTracer.Wrap starts a child span with span name and tags following the approach in +// Tracer.Wrap's usage of opentracing-contrib/go-stdlib/nethttp.Middleware +// and attaches the HTTP server span tags to the parent httpgrpc.HTTP/Handle gRPC span, allowing +// tracing tooling to differentiate the HTTP requests represented by the httpgrpc.HTTP/Handle spans. +// +// opentracing-contrib/go-stdlib/nethttp.Middleware could not be used here +// as it does not expose options to access and tag the incoming parent span. +func (hgt HTTPGRPCTracer) Wrap(next http.Handler) http.Handler { + httpOperationNameFunc := makeHTTPOperationNameFunc(hgt.RouteMatcher) + fn := func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + tracer := opentracing.GlobalTracer() + + parentSpan := opentracing.SpanFromContext(ctx) + + // extract relevant span & tag data from request + method := r.Method + matchedRoute := getRouteName(hgt.RouteMatcher, r) + urlPath := r.URL.Path + userAgent := r.Header.Get("User-Agent") + + // tag parent httpgrpc.HTTP/Handle server span, if it exists + if parentSpan != nil { + parentSpan.SetTag(string(ext.HTTPUrl), urlPath) + parentSpan.SetTag(string(ext.HTTPMethod), method) + parentSpan.SetTag("http.route", matchedRoute) + parentSpan.SetTag("http.user_agent", userAgent) + } + + // create and start child HTTP span + // mirroring opentracing-contrib/go-stdlib/nethttp.Middleware span name and tags + childSpanName := httpOperationNameFunc(r) + startSpanOpts := []opentracing.StartSpanOption{ + ext.SpanKindRPCServer, + opentracing.Tag{Key: string(ext.Component), Value: "net/http"}, + opentracing.Tag{Key: string(ext.HTTPUrl), Value: urlPath}, + opentracing.Tag{Key: string(ext.HTTPMethod), Value: method}, + opentracing.Tag{Key: "http.route", Value: matchedRoute}, + opentracing.Tag{Key: "http.user_agent", Value: userAgent}, + } + if parentSpan != nil { + startSpanOpts = append( + startSpanOpts, + opentracing.SpanReference{ + Type: opentracing.ChildOfRef, + ReferencedContext: parentSpan.Context(), + }) + } + + childSpan := tracer.StartSpan(childSpanName, startSpanOpts...) + defer childSpan.Finish() + + r = r.WithContext(opentracing.ContextWithSpan(r.Context(), childSpan)) + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +func makeHTTPOperationNameFunc(routeMatcher RouteMatcher) func(r *http.Request) string { + return func(r *http.Request) string { + op := getRouteName(routeMatcher, r) + if op == "" { + return "HTTP " + r.Method + } + return fmt.Sprintf("HTTP %s - %s", r.Method, op) + } +} diff --git a/vendor/github.com/weaveworks/common/middleware/instrument.go b/vendor/github.com/grafana/dskit/middleware/instrument.go similarity index 87% rename from vendor/github.com/weaveworks/common/middleware/instrument.go rename to vendor/github.com/grafana/dskit/middleware/instrument.go index ac1f76f0..e5ae9c53 100644 --- a/vendor/github.com/weaveworks/common/middleware/instrument.go +++ b/vendor/github.com/grafana/dskit/middleware/instrument.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/instrument.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( @@ -11,7 +15,7 @@ import ( "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/instrument" + "github.com/grafana/dskit/instrument" ) const mb = 1024 * 1024 @@ -78,11 +82,12 @@ func (i Instrument) Wrap(next http.Handler) http.Handler { } // Return a name identifier for ths request. There are three options: -// 1. The request matches a gorilla mux route, with a name. Use that. -// 2. The request matches an unamed gorilla mux router. Munge the path -// template such that templates like '/api/{org}/foo' come out as -// 'api_org_foo'. -// 3. The request doesn't match a mux route. Return "other" +// 1. The request matches a gorilla mux route, with a name. Use that. +// 2. The request matches an unamed gorilla mux router. Munge the path +// template such that templates like '/api/{org}/foo' come out as +// 'api_org_foo'. +// 3. The request doesn't match a mux route. Return "other" +// // We do all this as we do not wish to emit high cardinality labels to // prometheus. func (i Instrument) getRouteName(r *http.Request) string { diff --git a/vendor/github.com/grafana/dskit/middleware/logging.go b/vendor/github.com/grafana/dskit/middleware/logging.go new file mode 100644 index 00000000..aeb15cc6 --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/logging.go @@ -0,0 +1,146 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/logging.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package middleware + +import ( + "bytes" + "context" + "errors" + "net/http" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + dskit_log "github.com/grafana/dskit/log" + "github.com/grafana/dskit/tracing" + "github.com/grafana/dskit/user" +) + +// Log middleware logs http requests +type Log struct { + Log log.Logger + DisableRequestSuccessLog bool + LogRequestHeaders bool // LogRequestHeaders true -> dump http headers at debug log level + LogRequestAtInfoLevel bool // LogRequestAtInfoLevel true -> log requests at info log level + SourceIPs *SourceIPExtractor + HTTPHeadersToExclude map[string]bool +} + +var defaultExcludedHeaders = map[string]bool{ + "Cookie": true, + "X-Csrf-Token": true, + "Authorization": true, +} + +func NewLogMiddleware(log log.Logger, logRequestHeaders bool, logRequestAtInfoLevel bool, sourceIPs *SourceIPExtractor, headersList []string) Log { + httpHeadersToExclude := map[string]bool{} + for header := range defaultExcludedHeaders { + httpHeadersToExclude[header] = true + } + for _, header := range headersList { + httpHeadersToExclude[header] = true + } + + return Log{ + Log: log, + LogRequestHeaders: logRequestHeaders, + LogRequestAtInfoLevel: logRequestAtInfoLevel, + SourceIPs: sourceIPs, + HTTPHeadersToExclude: httpHeadersToExclude, + } +} + +// logWithRequest information from the request and context as fields. +func (l Log) logWithRequest(r *http.Request) log.Logger { + localLog := l.Log + traceID, ok := tracing.ExtractTraceID(r.Context()) + if ok { + localLog = log.With(localLog, "traceID", traceID) + } + + if l.SourceIPs != nil { + ips := l.SourceIPs.Get(r) + if ips != "" { + localLog = log.With(localLog, "sourceIPs", ips) + } + } + + return user.LogWith(r.Context(), localLog) +} + +// Wrap implements Middleware +func (l Log) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + begin := time.Now() + uri := r.RequestURI // capture the URI before running next, as it may get rewritten + requestLog := l.logWithRequest(r) + // Log headers before running 'next' in case other interceptors change the data. + headers, err := dumpRequest(r, l.HTTPHeadersToExclude) + if err != nil { + headers = nil + level.Error(requestLog).Log("msg", "could not dump request headers", "err", err) + } + var buf bytes.Buffer + wrapped := newBadResponseLoggingWriter(w, &buf) + next.ServeHTTP(wrapped, r) + + statusCode, writeErr := wrapped.getStatusCode(), wrapped.getWriteError() + + if writeErr != nil { + if errors.Is(writeErr, context.Canceled) { + if l.LogRequestAtInfoLevel { + level.Info(requestLog).Log("msg", dskit_log.LazySprintf("%s %s %s, request cancelled: %s ws: %v; %s", r.Method, uri, time.Since(begin), writeErr, IsWSHandshakeRequest(r), headers)) + } else { + level.Debug(requestLog).Log("msg", dskit_log.LazySprintf("%s %s %s, request cancelled: %s ws: %v; %s", r.Method, uri, time.Since(begin), writeErr, IsWSHandshakeRequest(r), headers)) + } + } else { + level.Warn(requestLog).Log("msg", dskit_log.LazySprintf("%s %s %s, error: %s ws: %v; %s", r.Method, uri, time.Since(begin), writeErr, IsWSHandshakeRequest(r), headers)) + } + + return + } + + switch { + // success and shouldn't log successful requests. + case statusCode >= 200 && statusCode < 300 && l.DisableRequestSuccessLog: + return + + case 100 <= statusCode && statusCode < 500 || statusCode == http.StatusBadGateway || statusCode == http.StatusServiceUnavailable: + if l.LogRequestAtInfoLevel { + if l.LogRequestHeaders && headers != nil { + level.Info(requestLog).Log("msg", dskit_log.LazySprintf("%s %s (%d) %s ws: %v; %s", r.Method, uri, statusCode, time.Since(begin), IsWSHandshakeRequest(r), string(headers))) + } else { + level.Info(requestLog).Log("msg", dskit_log.LazySprintf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin))) + } + } else { + if l.LogRequestHeaders && headers != nil { + level.Debug(requestLog).Log("msg", dskit_log.LazySprintf("%s %s (%d) %s ws: %v; %s", r.Method, uri, statusCode, time.Since(begin), IsWSHandshakeRequest(r), string(headers))) + } else { + level.Debug(requestLog).Log("msg", dskit_log.LazySprintf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin))) + } + } + default: + level.Warn(requestLog).Log("msg", dskit_log.LazySprintf("%s %s (%d) %s Response: %q ws: %v; %s", r.Method, uri, statusCode, time.Since(begin), buf.Bytes(), IsWSHandshakeRequest(r), headers)) + } + }) +} + +func dumpRequest(req *http.Request, httpHeadersToExclude map[string]bool) ([]byte, error) { + var b bytes.Buffer + + // In case users initialize the Log middleware using the exported struct, skip the default headers anyway + if len(httpHeadersToExclude) == 0 { + httpHeadersToExclude = defaultExcludedHeaders + } + // Exclude some headers for security, or just that we don't need them when debugging + err := req.Header.WriteSubset(&b, httpHeadersToExclude) + if err != nil { + return nil, err + } + + ret := bytes.Replace(b.Bytes(), []byte("\r\n"), []byte("; "), -1) + return ret, nil +} diff --git a/vendor/github.com/weaveworks/common/middleware/middleware.go b/vendor/github.com/grafana/dskit/middleware/middleware.go similarity index 81% rename from vendor/github.com/weaveworks/common/middleware/middleware.go rename to vendor/github.com/grafana/dskit/middleware/middleware.go index ad8925ac..79720b33 100644 --- a/vendor/github.com/weaveworks/common/middleware/middleware.go +++ b/vendor/github.com/grafana/dskit/middleware/middleware.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/middleware.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( diff --git a/vendor/github.com/weaveworks/common/middleware/path_rewrite.go b/vendor/github.com/grafana/dskit/middleware/path_rewrite.go similarity index 74% rename from vendor/github.com/weaveworks/common/middleware/path_rewrite.go rename to vendor/github.com/grafana/dskit/middleware/path_rewrite.go index 8250201a..2fc711bb 100644 --- a/vendor/github.com/weaveworks/common/middleware/path_rewrite.go +++ b/vendor/github.com/grafana/dskit/middleware/path_rewrite.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/path_rewrite.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( @@ -5,7 +9,9 @@ import ( "net/url" "regexp" - log "github.com/sirupsen/logrus" + "github.com/go-kit/log/level" + + "github.com/grafana/dskit/log" ) // PathRewrite supports regex matching and replace on Request URIs @@ -27,7 +33,7 @@ func (p pathRewrite) Wrap(next http.Handler) http.Handler { r.URL.RawPath = p.regexp.ReplaceAllString(r.URL.EscapedPath(), p.replacement) path, err := url.PathUnescape(r.URL.RawPath) if err != nil { - log.Errorf("Got invalid url-encoded path %v after applying path rewrite %v: %v", r.URL.RawPath, p, err) + level.Error(log.Global()).Log("msg", log.LazySprintf("got invalid url-encoded path %v after applying path rewrite %v", r.URL.RawPath, p), "err", err) w.WriteHeader(http.StatusInternalServerError) return } diff --git a/vendor/github.com/weaveworks/common/middleware/response.go b/vendor/github.com/grafana/dskit/middleware/response.go similarity index 85% rename from vendor/github.com/weaveworks/common/middleware/response.go rename to vendor/github.com/grafana/dskit/middleware/response.go index 61fc4a72..e2ce1d0a 100644 --- a/vendor/github.com/weaveworks/common/middleware/response.go +++ b/vendor/github.com/grafana/dskit/middleware/response.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/response.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( @@ -52,6 +56,11 @@ func newBadResponseLoggingWriter(rw http.ResponseWriter, buffer io.Writer) badRe return &b } +// Unwrap method is used by http.ResponseController to get access to original http.ResponseWriter. +func (b *nonFlushingBadResponseLoggingWriter) Unwrap() http.ResponseWriter { + return b.rw +} + // Header returns the header map that will be sent by WriteHeader. // Implements ResponseWriter. func (b *nonFlushingBadResponseLoggingWriter) Header() http.Header { @@ -103,12 +112,12 @@ func (b *nonFlushingBadResponseLoggingWriter) getWriteError() error { func (b *nonFlushingBadResponseLoggingWriter) captureResponseBody(data []byte) { if len(data) > b.bodyBytesLeft { - b.buffer.Write(data[:b.bodyBytesLeft]) - io.WriteString(b.buffer, "...") + _, _ = b.buffer.Write(data[:b.bodyBytesLeft]) + _, _ = io.WriteString(b.buffer, "...") b.bodyBytesLeft = 0 b.logBody = false } else { - b.buffer.Write(data) + _, _ = b.buffer.Write(data) b.bodyBytesLeft -= len(data) } } diff --git a/vendor/github.com/weaveworks/common/middleware/source_ips.go b/vendor/github.com/grafana/dskit/middleware/source_ips.go similarity index 95% rename from vendor/github.com/weaveworks/common/middleware/source_ips.go rename to vendor/github.com/grafana/dskit/middleware/source_ips.go index 17178d42..7c035ddb 100644 --- a/vendor/github.com/weaveworks/common/middleware/source_ips.go +++ b/vendor/github.com/grafana/dskit/middleware/source_ips.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/middleware/source_ips.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package middleware import ( diff --git a/vendor/github.com/grafana/dskit/middleware/zero_response.go b/vendor/github.com/grafana/dskit/middleware/zero_response.go new file mode 100644 index 00000000..1bb4ecc8 --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/zero_response.go @@ -0,0 +1,132 @@ +package middleware + +import ( + "errors" + "net" + "os" + "regexp" + "strconv" + "sync" + + "github.com/go-kit/log" + "go.uber.org/atomic" +) + +// NewZeroResponseListener returns a Listener that logs all connections that encountered io timeout on reads, and were closed before sending any response. +func NewZeroResponseListener(list net.Listener, log log.Logger) net.Listener { + return &zeroResponseListener{ + Listener: list, + log: log, + bufPool: sync.Pool{ + New: func() interface{} { return &bufHolder{buf: make([]byte, 0, requestBufSize)} }, + }, + } +} + +// Wrap a slice in a struct, so we can store a pointer in sync.Pool +type bufHolder struct { + buf []byte +} + +// Size of buffer for read data. We log this eventually. +const requestBufSize = 512 + +type zeroResponseListener struct { + net.Listener + log log.Logger + bufPool sync.Pool // pool of &bufHolder. +} + +func (zl *zeroResponseListener) Accept() (net.Conn, error) { + conn, err := zl.Listener.Accept() + if err != nil { + return nil, err + } + bh := zl.bufPool.Get().(*bufHolder) + bh.buf = bh.buf[:0] + return &zeroResponseConn{Conn: conn, log: zl.log, bufHolder: bh, returnPool: &zl.bufPool}, nil +} + +type zeroResponseConn struct { + net.Conn + + log log.Logger + once sync.Once + returnPool *sync.Pool + + bufHolderMux sync.Mutex + bufHolder *bufHolder // Buffer with first requestBufSize bytes from connection. Set to nil as soon as data is written to the connection. + + lastReadErrIsDeadlineExceeded atomic.Bool +} + +func (zc *zeroResponseConn) Read(b []byte) (n int, err error) { + n, err = zc.Conn.Read(b) + if err != nil && errors.Is(err, os.ErrDeadlineExceeded) { + zc.lastReadErrIsDeadlineExceeded.Store(true) + } else { + zc.lastReadErrIsDeadlineExceeded.Store(false) + } + + // Store first requestBufSize read bytes on connection into the buffer for logging. + if n > 0 { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + if zc.bufHolder != nil { + rem := requestBufSize - len(zc.bufHolder.buf) // how much space is in our buffer. + if rem > n { + rem = n + } + if rem > 0 { + zc.bufHolder.buf = append(zc.bufHolder.buf, b[:rem]...) + } + } + } + return +} + +func (zc *zeroResponseConn) Write(b []byte) (n int, err error) { + n, err = zc.Conn.Write(b) + if n > 0 { + zc.bufHolderMux.Lock() + if zc.bufHolder != nil { + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + } + zc.bufHolderMux.Unlock() + } + return +} + +var authRegexp = regexp.MustCompile(`((?i)\r\nauthorization:\s+)(\S+\s+)(\S+)`) + +func (zc *zeroResponseConn) Close() error { + err := zc.Conn.Close() + + zc.once.Do(func() { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + // If buffer was already returned, it means there was some data written on the connection, nothing to do. + if zc.bufHolder == nil { + return + } + + // If we didn't write anything to this connection, and we've got timeout while reading data, it looks like + // slow a slow client failing to send a request to us. + if !zc.lastReadErrIsDeadlineExceeded.Load() { + return + } + + b := zc.bufHolder.buf + b = authRegexp.ReplaceAll(b, []byte("${1}${2}***")) // Replace value in Authorization header with ***. + + _ = zc.log.Log("msg", "read timeout, connection closed with no response", "read", strconv.Quote(string(b)), "remote", zc.RemoteAddr().String()) + + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + }) + + return err +} diff --git a/vendor/github.com/grafana/dskit/multierror/multierror.go b/vendor/github.com/grafana/dskit/multierror/multierror.go new file mode 100644 index 00000000..68b73e20 --- /dev/null +++ b/vendor/github.com/grafana/dskit/multierror/multierror.go @@ -0,0 +1,75 @@ +// Provenance-includes-location: https://github.com/thanos-io/thanos/blob/2027fb30/pkg/errutil/multierror.go +// Provenance-includes-copyright: The Thanos Authors. + +package multierror + +import ( + "bytes" + "errors" + "fmt" +) + +// MultiError implements the error interface, and contains the errors used to construct it. +type MultiError []error + +// Add adds the error to the error list if it is not nil. +func (es *MultiError) Add(err error) { + if err == nil { + return + } + if merr, ok := err.(nonNilMultiError); ok { + *es = append(*es, merr...) + } else { + *es = append(*es, err) + } +} + +// Err returns the error list as an error or nil if it is empty. +func (es MultiError) Err() error { + if len(es) == 0 { + return nil + } + return nonNilMultiError(es) +} + +// New returns a new MultiError containing supplied errors. +func New(errs ...error) MultiError { + merr := MultiError{} + for _, err := range errs { + merr.Add(err) + } + + return merr +} + +type nonNilMultiError MultiError + +// Error returns a concatenated string of the contained errors. +func (es nonNilMultiError) Error() string { + var buf bytes.Buffer + + if len(es) > 1 { + fmt.Fprintf(&buf, "%d errors: ", len(es)) + } + + for i, err := range es { + if i != 0 { + buf.WriteString("; ") + } + buf.WriteString(err.Error()) + } + + return buf.String() +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored in the MultiError. +func (es nonNilMultiError) Is(target error) bool { + for _, err := range es { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/github.com/grafana/dskit/netutil/netutil.go b/vendor/github.com/grafana/dskit/netutil/netutil.go index 3803c0df..2711a0f7 100644 --- a/vendor/github.com/grafana/dskit/netutil/netutil.go +++ b/vendor/github.com/grafana/dskit/netutil/netutil.go @@ -1,10 +1,13 @@ package netutil import ( + "fmt" "net" + "net/netip" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/pkg/errors" ) var ( @@ -54,3 +57,164 @@ func privateNetworkInterfaces(all []net.Interface, fallback []string, logger log } return privInts } + +// GetFirstAddressOf returns the first IPv4/IPV6 address of the supplied interface names, omitting any link-local addresses. +func GetFirstAddressOf(names []string, logger log.Logger, enableInet6 bool) (string, error) { + return getFirstAddressOf(names, logger, getInterfaceAddresses, enableInet6) +} + +// NetworkInterfaceAddressGetter matches the signature of net.InterfaceByName() to allow for test mocks. +type NetworkInterfaceAddressGetter func(name string) ([]netip.Addr, error) + +// getFirstAddressOf returns the first IPv4/IPV6 address of the supplied interface names, omitting any link-local addresses. +func getFirstAddressOf(names []string, logger log.Logger, interfaceAddrsFunc NetworkInterfaceAddressGetter, enableInet6 bool) (string, error) { + var ipAddr netip.Addr + + // When passing an empty list of interface names, we select all interfaces. + if len(names) == 0 { + infs, err := net.Interfaces() + if err != nil { + return "", fmt.Errorf("failed to get interface list and no interface names supplied: %w", err) + } + names = make([]string, len(infs)) + for i, v := range infs { + names[i] = v.Name + } + } + + level.Debug(logger).Log("msg", "looking for addresses", "inf", fmt.Sprintf("%s", names), "inet6enabled", enableInet6) + + for _, name := range names { + addrs, err := interfaceAddrsFunc(name) + if err != nil { + level.Warn(logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) + continue + } + + if len(addrs) <= 0 { + level.Warn(logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) + continue + } + if ip := filterBestIP(addrs, enableInet6); ip.IsValid() { + // Select the best between what we've received + ipAddr = filterBestIP([]netip.Addr{ip, ipAddr}, enableInet6) + } + level.Debug(logger).Log("msg", "detected highest quality address", "ipAddr", ipAddr.String(), "inf", name) + if ipAddr.IsLinkLocalUnicast() || !ipAddr.IsValid() { + level.Debug(logger).Log("msg", "ignoring address", "ipAddr", ipAddr.String(), "inf", name) + continue + } + + if enableInet6 { + if ipAddr.Is6() { + return ipAddr.String(), nil + } + continue + } + + return ipAddr.String(), nil + } + + level.Debug(logger).Log("msg", "detected IP address after looking up all configured interface names", "ipAddr", ipAddr.String()) + if !ipAddr.IsValid() { + return "", fmt.Errorf("no useable address found for interfaces %s", names) + } + if ipAddr.IsLinkLocalUnicast() { + level.Warn(logger).Log("msg", "using link-local address", "address", ipAddr.String()) + } + return ipAddr.String(), nil +} + +// getInterfaceAddresses is the standard approach to collecting []net.Addr from a network interface by name. +func getInterfaceAddresses(name string) ([]netip.Addr, error) { + inf, err := net.InterfaceByName(name) + if err != nil { + return nil, err + } + + addrs, err := inf.Addrs() + if err != nil { + return nil, err + } + + // Using netip.Addr to allow for easier and consistent address parsing. + // Without this, the net.ParseCIDR() that we might like to use in a test does + // not have the same net.Addr implementation that we get from calling + // interface.Addrs() as above. Here we normalize on netip.Addr. + netaddrs := make([]netip.Addr, len(addrs)) + for i, a := range addrs { + prefix, err := netip.ParsePrefix(a.String()) + if err != nil { + return nil, errors.Wrap(err, "failed to parse netip.Prefix") + } + netaddrs[i] = prefix.Addr() + } + + return netaddrs, nil +} + +// filterBestIP returns an opinionated "best" address from a list of addresses. +// A high quality address is one that is considered routable, and not in the link-local address space. +// A low quality address is a link-local address. +// When an IPv6 is enabled using enableInet6, an IPv6 will be preferred over an equivalent quality IPv4 address, +// otherwise IPv6 addresses are guaranteed to not be returned from this function. +// Loopback addresses are never selected. +func filterBestIP(addrs []netip.Addr, enableInet6 bool) netip.Addr { + var invalid, inet4Addr, inet6Addr netip.Addr + + for _, addr := range addrs { + if addr.IsLoopback() || !addr.IsValid() { + continue + } + + if addr.Is6() && !enableInet6 { + continue + } + + if addr.Is4() { + // If we have already been set, can we improve on the quality? + if inet4Addr.IsValid() { + if inet4Addr.IsLinkLocalUnicast() && !addr.IsLinkLocalUnicast() { + inet4Addr = addr + } + continue + } + inet4Addr = addr + } + + if addr.Is6() { + // If we have already been set, can we improve on the quality? + if inet6Addr.IsValid() { + if inet6Addr.IsLinkLocalUnicast() && !addr.IsLinkLocalUnicast() { + inet6Addr = addr + } + continue + } + inet6Addr = addr + } + } + + // If both address families have been set, compare. + if inet4Addr.IsValid() && inet6Addr.IsValid() { + if inet4Addr.IsLinkLocalUnicast() && !inet6Addr.IsLinkLocalUnicast() { + return inet6Addr + } + if inet6Addr.IsLinkLocalUnicast() && !inet4Addr.IsLinkLocalUnicast() { + return inet4Addr + } + if enableInet6 { + return inet6Addr + } + return inet4Addr + } + + if inet4Addr.IsValid() { + return inet4Addr + } + + if inet6Addr.IsValid() { + return inet6Addr + } + + return invalid +} diff --git a/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go b/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go index 780926c0..0b72ef17 100644 --- a/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go +++ b/vendor/github.com/grafana/dskit/ring/basic_lifecycler.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/services" @@ -56,14 +57,23 @@ type BasicLifecyclerConfig struct { // If true lifecycler doesn't unregister instance from the ring when it's stopping. Default value is false, // which means unregistering. KeepInstanceInTheRingOnShutdown bool + + // If set, specifies the TokenGenerator implementation that will be used for generating tokens. + // Default value is nil, which means that RandomTokenGenerator is used. + RingTokenGenerator TokenGenerator } -// BasicLifecycler is a basic ring lifecycler which allows to hook custom -// logic at different stages of the lifecycle. This lifecycler should be -// used to build higher level lifecyclers. -// -// This lifecycler never change the instance state. It's the delegate -// responsibility to ChangeState(). +/* +BasicLifecycler is a Service that is responsible for publishing changes to a ring for a single instance. +It accepts a delegate that can handle lifecycle events, and should be used to build higher level lifecyclers. +Unlike [Lifecycler], BasicLifecycler does not change instance state internally. +Rather, it's the delegate's responsibility to call [BasicLifecycler.ChangeState]. + + - When a BasicLifecycler first starts, it will call [ring.BasicLifecyclerDelegate.OnRingInstanceRegister] for the delegate, and will add the instance to the ring. + - The lifecycler will then periodically, based on the [ring.BasicLifecyclerConfig.TokensObservePeriod], attempt to verify that its tokens have been added to the ring, after which it will call [ring.BasicLifecyclerDelegate.OnRingInstanceTokens]. + - The lifecycler will update they key/value store with heartbeats and state changes based on the [ring.BasicLifecyclerConfig.HeartbeatPeriod], calling [ring.BasicLifecyclerDelegate.OnRingInstanceHeartbeat] each time. + - When the BasicLifecycler is stopped, it will call [ring.BasicLifecyclerDelegate.OnRingInstanceStopping]. +*/ type BasicLifecycler struct { *services.BasicService @@ -83,19 +93,31 @@ type BasicLifecycler struct { // The current instance state. currState sync.RWMutex currInstanceDesc *InstanceDesc + + // Whether to keep the instance in the ring or to unregister it on shutdown + keepInstanceInTheRingOnShutdown *atomic.Bool + + tokenGenerator TokenGenerator } // NewBasicLifecycler makes a new BasicLifecycler. func NewBasicLifecycler(cfg BasicLifecyclerConfig, ringName, ringKey string, store kv.Client, delegate BasicLifecyclerDelegate, logger log.Logger, reg prometheus.Registerer) (*BasicLifecycler, error) { + tokenGenerator := cfg.RingTokenGenerator + if tokenGenerator == nil { + tokenGenerator = NewRandomTokenGenerator() + } + l := &BasicLifecycler{ - cfg: cfg, - ringName: ringName, - ringKey: ringKey, - logger: logger, - store: store, - delegate: delegate, - metrics: NewBasicLifecyclerMetrics(ringName, reg), - actorChan: make(chan func()), + cfg: cfg, + ringName: ringName, + ringKey: ringKey, + logger: logger, + store: store, + delegate: delegate, + metrics: NewBasicLifecyclerMetrics(ringName, reg), + actorChan: make(chan func()), + keepInstanceInTheRingOnShutdown: atomic.NewBool(cfg.KeepInstanceInTheRingOnShutdown), + tokenGenerator: tokenGenerator, } l.metrics.tokensToOwn.Set(float64(cfg.NumTokens)) @@ -138,6 +160,10 @@ func (l *BasicLifecycler) GetTokens() Tokens { return l.currInstanceDesc.GetTokens() } +func (l *BasicLifecycler) GetTokenGenerator() TokenGenerator { + return l.tokenGenerator +} + // GetRegisteredAt returns the timestamp when the instance has been registered to the ring // or a zero value if the lifecycler hasn't been started yet or was already registered and its // timestamp is unknown. @@ -162,6 +188,16 @@ func (l *BasicLifecycler) ChangeState(ctx context.Context, state InstanceState) }) } +// ShouldKeepInstanceInTheRingOnShutdown returns if the instance should be kept in the ring or unregistered on shutdown. +func (l *BasicLifecycler) ShouldKeepInstanceInTheRingOnShutdown() bool { + return l.keepInstanceInTheRingOnShutdown.Load() +} + +// SetKeepInstanceInTheRingOnShutdown enables/disables unregistering on shutdown. +func (l *BasicLifecycler) SetKeepInstanceInTheRingOnShutdown(enabled bool) { + l.keepInstanceInTheRingOnShutdown.Store(enabled) +} + func (l *BasicLifecycler) starting(ctx context.Context) error { if err := l.registerInstance(ctx); err != nil { return errors.Wrap(err, "register instance in the ring") @@ -232,7 +268,7 @@ heartbeatLoop: } } - if l.cfg.KeepInstanceInTheRingOnShutdown { + if l.ShouldKeepInstanceInTheRingOnShutdown() { level.Info(l.logger).Log("msg", "keeping instance the ring", "ring", l.ringName) } else { // Remove the instance from the ring. @@ -277,11 +313,6 @@ func (l *BasicLifecycler) registerInstance(ctx context.Context) error { registeredAt = time.Now() } - if !exists { - instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state, registeredAt) - return ringDesc, true, nil - } - // Always overwrite the instance in the ring (even if already exists) because some properties // may have changed (stated, tokens, zone, address) and even if they didn't the heartbeat at // least did. @@ -350,7 +381,7 @@ func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { needTokens := l.cfg.NumTokens - len(actualTokens) level.Info(l.logger).Log("msg", "generating new tokens", "count", needTokens, "ring", l.ringName) - newTokens := GenerateTokens(needTokens, takenTokens) + newTokens := l.tokenGenerator.GenerateTokens(needTokens, takenTokens) actualTokens = append(actualTokens, newTokens...) sort.Sort(actualTokens) diff --git a/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go b/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go index 177962f6..34cc0d92 100644 --- a/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go +++ b/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go @@ -83,6 +83,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife // case the instance exist in the ring (which is OK because the lifecycler // will correctly reconcile this case too). return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), InstanceDesc{ + Id: lifecycler.GetInstanceID(), Addr: lifecycler.GetInstanceAddr(), Timestamp: time.Now().Unix(), RegisteredTimestamp: lifecycler.GetRegisteredAt().Unix(), @@ -164,7 +165,7 @@ func NewInstanceRegisterDelegate(state InstanceState, tokenCount int) InstanceRe } } -func (d InstanceRegisterDelegate) OnRingInstanceRegister(_ *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (InstanceState, Tokens) { +func (d InstanceRegisterDelegate) OnRingInstanceRegister(l *BasicLifecycler, ringDesc Desc, instanceExists bool, _ string, instanceDesc InstanceDesc) (InstanceState, Tokens) { // Keep the existing tokens if any, otherwise start with a clean situation. var tokens []uint32 if instanceExists { @@ -172,7 +173,7 @@ func (d InstanceRegisterDelegate) OnRingInstanceRegister(_ *BasicLifecycler, rin } takenTokens := ringDesc.GetTokens() - newTokens := GenerateTokens(d.tokenCount-len(tokens), takenTokens) + newTokens := l.GetTokenGenerator().GenerateTokens(d.tokenCount-len(tokens), takenTokens) // Tokens sorting will be enforced by the parent caller. tokens = append(tokens, newTokens...) diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index fa627445..5acd8fd0 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -8,7 +8,8 @@ import ( "sync" "go.uber.org/atomic" - "google.golang.org/grpc/status" + + grpcUtils "github.com/grafana/dskit/grpcutil" ) type batchTracker struct { @@ -25,40 +26,79 @@ type instance struct { } type itemTracker struct { - minSuccess int - maxFailures int - succeeded atomic.Int32 - failed4xx atomic.Int32 - failed5xx atomic.Int32 - remaining atomic.Int32 - err atomic.Error + minSuccess int + maxFailures int + succeeded atomic.Int32 + failedClient atomic.Int32 + failedServer atomic.Int32 + remaining atomic.Int32 + err atomic.Error } -func (i *itemTracker) recordError(err error) int32 { +func (i *itemTracker) recordError(err error, isClientError func(error) bool) int32 { i.err.Store(err) - if s, ok := status.FromError(err); ok && s.Code()/100 == 4 { - return i.failed4xx.Inc() + if isClientError(err) { + return i.failedClient.Inc() } + return i.failedServer.Inc() +} - return i.failed5xx.Inc() +func isHTTPStatus4xx(err error) bool { + code := grpcUtils.ErrorToStatusCode(err) + return code/100 == 4 } -// DoBatch request against a set of keys in the ring, handling replication and -// failures. For example if we want to write N items where they may all -// hit different instances, and we want them all replicated R ways with -// quorum writes, we track the relationship between batch RPCs and the items -// within them. -// -// Callback is passed the instance to target, and the indexes of the keys -// to send to that instance. +// DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors. +// Deprecated. Use DoBatchWithOptions instead. +func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { + return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{ + Cleanup: cleanup, + IsClientError: isHTTPStatus4xx, + }) +} + +// DoBatchOptions defines options for the DoBatchWithOptions call. +// Zero value options are valid, as well as individual zero valued fields. +type DoBatchOptions struct { + // Cleanup is always called, either on an error before starting the batches or after they are all finished. + // If nil, a noop will be called. + Cleanup func() + + // IsClientError classifies errors returned by `callback()` into client or server errors. + // See `batchTracker.record()` function for details about how errors are combined into final error returned by DoBatchWithClientError. + // If nil, a default implementation is used that classifies grpc errors containing status codes 4xx as client errors. + IsClientError func(error) bool + + // Go will be used to spawn the callback goroutines, and can be used to use a worker pool like concurrency.ReusableGoroutinesPool. + Go func(func()) +} + +func (o *DoBatchOptions) replaceZeroValuesWithDefaults() { + if o.Cleanup == nil { + o.Cleanup = func() {} + } + if o.IsClientError == nil { + o.IsClientError = isHTTPStatus4xx + } + if o.Go == nil { + o.Go = func(f func()) { go f() } + } +} + +// DoBatchWithOptions request against a set of keys in the ring, handling replication and failures. +// For example if we want to write N items where they may all hit different instances, +// and we want them all replicated R ways with quorum writes, +// we track the relationship between batch RPCs and the items within them. // -// cleanup() is always called, either on an error before starting the batches or after they all finish. +// See comments on DoBatchOptions for available options for this call. // -// Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +// Not implemented as a method on Ring, so we can test separately. +func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { + o.replaceZeroValuesWithDefaults() + if r.InstancesCount() <= 0 { - cleanup() + o.Cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() @@ -73,7 +113,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb for i, key := range keys { replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0]) if err != nil { - cleanup() + o.Cleanup() return err } itemTrackers[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors @@ -104,19 +144,19 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb wg.Add(len(instances)) for _, i := range instances { - go func(i instance) { + i := i + o.Go(func() { err := callback(i.desc, i.indexes) - tracker.record(i.itemTrackers, err) + tracker.record(i.itemTrackers, err, o.IsClientError) wg.Done() - }(i) + }) } // Perform cleanup at the end. - go func() { + o.Go(func() { wg.Wait() - - cleanup() - }() + o.Cleanup() + }) select { case err := <-tracker.err: @@ -128,35 +168,36 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb } } -func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { +func (b *batchTracker) record(itemTrackers []*itemTracker, err error, isClientError func(error) bool) { // If we reach the required number of successful puts on this item, then decrement the // number of pending items by one. // // The use of atomic increments here is needed as: // * rpcsPending and rpcsFailed guarantee only a single goroutine will write to either channel - // * succeeded, failed4xx, failed5xx and remaining guarantee that the "return decision" is made atomically + // * succeeded, failedClient, failedServer and remaining guarantee that the "return decision" is made atomically // avoiding race condition - for i := range itemTrackers { + for _, it := range itemTrackers { if err != nil { // Track the number of errors by error family, and if it exceeds maxFailures // shortcut the waiting rpc. - errCount := itemTrackers[i].recordError(err) + errCount := it.recordError(err, isClientError) // We should return an error if we reach the maxFailure (quorum) on a given error family OR - // we don't have any remaining instances to try. + // we don't have any remaining instances to try. In the following we use ClientError and ServerError + // to denote errors, for which isClientError() returns true and false respectively. // - // Ex: 2xx, 4xx, 5xx -> return 5xx - // Ex: 4xx, 4xx, _ -> return 4xx - // Ex: 5xx, _, 5xx -> return 5xx + // Ex: Success, ClientError, ServerError -> return ServerError + // Ex: ClientError, ClientError, Success -> return ClientError + // Ex: ServerError, Success, ServerError -> return ServerError // - // The reason for searching for quorum in 4xx and 5xx errors separately is to give a more accurate - // response to the initial request. So if a quorum of instances rejects the request with 4xx, then the request should be rejected - // even if less-than-quorum instances indicated a failure to process the request (via 5xx). + // The reason for searching for quorum in ClientError and ServerError errors separately is to give a more accurate + // response to the initial request. So if a quorum of instances rejects the request with ClientError, then the request should be rejected + // even if less-than-quorum instances indicated a failure to process the request (via ServerError). // The speculation is that had the unavailable instances been available, - // they would have rejected the request with a 4xx as well. - // Conversely, if a quorum of instances failed to process the request via 5xx and less-than-quorum - // instances rejected it with 4xx, then we do not have quorum to reject the request as a 4xx. Instead, - // we return the last 5xx error for debuggability. - if errCount > int32(itemTrackers[i].maxFailures) || itemTrackers[i].remaining.Dec() == 0 { + // they would have rejected the request with a ClientError as well. + // Conversely, if a quorum of instances failed to process the request via ServerError and less-than-quorum + // instances rejected it with ClientError, then we do not have quorum to reject the request as a ClientError. Instead, + // we return the last ServerError error for debuggability. + if errCount > int32(it.maxFailures) || it.remaining.Dec() == 0 { if b.rpcsFailed.Inc() == 1 { b.err <- err } @@ -164,7 +205,8 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } else { // If we successfully process items in minSuccess instances, // then wake up the waiting rpc, so it can return early. - if itemTrackers[i].succeeded.Inc() >= int32(itemTrackers[i].minSuccess) { + succeeded := it.succeeded.Inc() + if succeeded == int32(it.minSuccess) { if b.rpcsPending.Dec() == 0 { b.done <- struct{}{} } @@ -172,11 +214,12 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } // If we successfully called this particular instance, but we don't have any remaining instances to try, - // and we failed to call minSuccess instances, then we need to return the last error - // Ex: 4xx, 5xx, 2xx - if itemTrackers[i].remaining.Dec() == 0 { - if b.rpcsFailed.Inc() == 1 { - b.err <- itemTrackers[i].err.Load() + // and we failed to call minSuccess instances, then we need to return the last error. + if succeeded < int32(it.minSuccess) { + if it.remaining.Dec() == 0 { + if b.rpcsFailed.Inc() == 1 { + b.err <- it.err.Load() + } } } } diff --git a/vendor/github.com/grafana/dskit/ring/doc.go b/vendor/github.com/grafana/dskit/ring/doc.go new file mode 100644 index 00000000..541f00f7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/doc.go @@ -0,0 +1,20 @@ +/* +Package ring contains types and functions for creating and working with rings. + +# Overview + +Rings are shared between instances via a key-value store, and are represented by the [Desc] struct, which contains a map of [InstanceDesc] +structs representing individual instances in the ring. + +# Creating a Ring + +Two types are available for creating and updating rings: + + - [Lifecycler] - A Service that writes to a ring on behalf of a single instance. It's responsible for claiming tokens on the ring and updating the key/value store with heartbeats, state changes, and token changes. This type is the original lifecycler implementation, and [BasicLifecycler] should be used instead for new services. + - [BasicLifecycler] - A Service that writes to a ring on behalf of a single instance. It's responsible for claiming tokens on the ring, updating the key/value store with heartbeats and state changes, and uses a delegate with event listeners to help with these. This type is general purpose, is used by numerous services, and is meant for building higher level lifecyclers. + +# Observing a ring + +The [Ring] type is a Service that is primarily used for reading and watching for changes to a ring. +*/ +package ring diff --git a/vendor/github.com/grafana/dskit/ring/flush.go b/vendor/github.com/grafana/dskit/ring/flush.go index d07908e8..798169b2 100644 --- a/vendor/github.com/grafana/dskit/ring/flush.go +++ b/vendor/github.com/grafana/dskit/ring/flush.go @@ -31,6 +31,6 @@ func NewNoopFlushTransferer() *NoopFlushTransferer { func (t *NoopFlushTransferer) Flush() {} // TransferOut is a noop -func (t *NoopFlushTransferer) TransferOut(ctx context.Context) error { +func (t *NoopFlushTransferer) TransferOut(_ context.Context) error { return nil } diff --git a/vendor/github.com/grafana/dskit/ring/http.go b/vendor/github.com/grafana/dskit/ring/http.go index 6521ca20..e70b3e6f 100644 --- a/vendor/github.com/grafana/dskit/ring/http.go +++ b/vendor/github.com/grafana/dskit/ring/http.go @@ -93,7 +93,7 @@ func (h *ringPageHandler) handle(w http.ResponseWriter, req *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - ownedTokens := ringDesc.countTokens() + ownedTokens := ringDesc.CountTokens() var ingesterIDs []string for id := range ringDesc.Ingesters { diff --git a/vendor/github.com/grafana/dskit/ring/lifecycler.go b/vendor/github.com/grafana/dskit/ring/lifecycler.go index dfef6afb..4f51b46a 100644 --- a/vendor/github.com/grafana/dskit/ring/lifecycler.go +++ b/vendor/github.com/grafana/dskit/ring/lifecycler.go @@ -4,9 +4,12 @@ import ( "context" "flag" "fmt" + "math/rand" + "net" "net/http" "os" "sort" + "strconv" "sync" "time" @@ -16,6 +19,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/netutil" @@ -34,6 +38,7 @@ type LifecyclerConfig struct { JoinAfter time.Duration `yaml:"join_after" category:"advanced"` MinReadyDuration time.Duration `yaml:"min_ready_duration" category:"advanced"` InfNames []string `yaml:"interface_names" doc:"default=[]"` + EnableInet6 bool `yaml:"enable_inet6" category:"advanced"` // FinalSleep's default value can be overridden by // setting it before calling RegisterFlags or RegisterFlagsWithPrefix. @@ -50,6 +55,10 @@ type LifecyclerConfig struct { // Injected internally ListenPort int `yaml:"-"` + + // If set, specifies the TokenGenerator implementation that will be used for generating tokens. + // Default value is nil, which means that RandomTokenGenerator is used. + RingTokenGenerator TokenGenerator `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -91,9 +100,30 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag f.StringVar(&cfg.Zone, prefix+"availability-zone", "", "The availability zone where this instance is running.") f.BoolVar(&cfg.UnregisterOnShutdown, prefix+"unregister-on-shutdown", true, "Unregister from the ring upon clean shutdown. It can be useful to disable for rolling restarts with consistent naming in conjunction with -distributor.extend-writes=false.") f.BoolVar(&cfg.ReadinessCheckRingHealth, prefix+"readiness-check-ring-health", true, "When enabled the readiness probe succeeds only after all instances are ACTIVE and healthy in the ring, otherwise only the instance itself is checked. This option should be disabled if in your cluster multiple instances can be rolled out simultaneously, otherwise rolling updates may be slowed down.") + f.BoolVar(&cfg.EnableInet6, prefix+"enable-inet6", false, "Enable IPv6 support. Required to make use of IP addresses from IPv6 interfaces.") +} + +// Validate checks the consistency of LifecyclerConfig, and fails if this cannot be achieved. +func (cfg *LifecyclerConfig) Validate() error { + _, ok := cfg.RingTokenGenerator.(*SpreadMinimizingTokenGenerator) + if ok { + // If cfg.RingTokenGenerator is a SpreadMinimizingTokenGenerator, we must ensure that + // the tokens are not loaded from file. + if cfg.TokensFilePath != "" { + return errors.New("you can't configure the tokens file path when using the spread minimizing token strategy. Please set the tokens file path to an empty string") + } + } + return nil } -// Lifecycler is responsible for managing the lifecycle of entries in the ring. +/* +Lifecycler is a Service that is responsible for publishing changes to a ring for a single instance. + + - When a Lifecycler first starts, it will be in a [PENDING] state. + - After the configured [ring.LifecyclerConfig.JoinAfter] period, it selects some random tokens and enters the [JOINING] state, creating or updating the ring as needed. + - The lifecycler will then periodically, based on the [ring.LifecyclerConfig.ObservePeriod], attempt to verify that its tokens have been added to the ring, after which it will transition to the [ACTIVE] state. + - The lifecycler will update the key/value store with heartbeats, state changes, and token changes, based on the [ring.LifecyclerConfig.HeartbeatPeriod]. +*/ type Lifecycler struct { *services.BasicService @@ -130,15 +160,22 @@ type Lifecycler struct { // Keeps stats updated at every heartbeat period countersLock sync.RWMutex healthyInstancesCount int + instancesCount int + instancesInZoneCount int zonesCount int + tokenGenerator TokenGenerator + // The maximum time allowed to wait on the CanJoin() condition. + // Configurable for testing purposes only. + canJoinTimeout time.Duration + lifecyclerMetrics *LifecyclerMetrics logger log.Logger } // NewLifecycler creates new Lifecycler. It must be started via StartAsync. func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringName, ringKey string, flushOnShutdown bool, logger log.Logger, reg prometheus.Registerer) (*Lifecycler, error) { - addr, err := GetInstanceAddr(cfg.Addr, cfg.InfNames, logger) + addr, err := GetInstanceAddr(cfg.Addr, cfg.InfNames, logger, cfg.EnableInet6) if err != nil { return nil, err } @@ -161,11 +198,22 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa flushTransferer = NewNoopFlushTransferer() } + tokenGenerator := cfg.RingTokenGenerator + if tokenGenerator == nil { + tokenGenerator = NewRandomTokenGenerator() + } + + // We validate cfg before we create a Lifecycler. + err = cfg.Validate() + if err != nil { + return nil, err + } + l := &Lifecycler{ cfg: cfg, flushTransferer: flushTransferer, KVStore: store, - Addr: fmt.Sprintf("%s:%d", addr, port), + Addr: net.JoinHostPort(addr, strconv.Itoa(port)), ID: cfg.ID, RingName: ringName, RingKey: ringKey, @@ -175,6 +223,8 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa Zone: cfg.Zone, actorChan: make(chan func()), state: PENDING, + tokenGenerator: tokenGenerator, + canJoinTimeout: 5 * time.Minute, lifecyclerMetrics: NewLifecyclerMetrics(ringName, reg), logger: logger, } @@ -383,6 +433,23 @@ func (i *Lifecycler) HealthyInstancesCount() int { return i.healthyInstancesCount } +// InstancesCount returns the total number of instances in the ring, updated during the last heartbeat period. +func (i *Lifecycler) InstancesCount() int { + i.countersLock.RLock() + defer i.countersLock.RUnlock() + + return i.instancesCount +} + +// InstancesInZoneCount returns the number of instances in the ring that are registered in +// this lifecycler's zone, updated during the last heartbeat period. +func (i *Lifecycler) InstancesInZoneCount() int { + i.countersLock.RLock() + defer i.countersLock.RUnlock() + + return i.instancesInZoneCount +} + // ZonesCount returns the number of zones for which there's at least 1 instance registered // in the ring. func (i *Lifecycler) ZonesCount() int { @@ -529,7 +596,7 @@ heartbeatLoop: } // initRing is the first thing we do when we start. It: -// - add an ingester entry to the ring +// - adds an ingester entry to the ring // - copies out our state and tokens if they exist func (i *Lifecycler) initRing(ctx context.Context) error { var ( @@ -548,11 +615,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { } err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { - if in == nil { - ringDesc = NewDesc() - } else { - ringDesc = in.(*Desc) - } + ringDesc = GetOrCreateRingDesc(in) instanceDesc, ok := ringDesc.Ingesters[i.ID] if !ok { @@ -593,23 +656,43 @@ func (i *Lifecycler) initRing(ctx context.Context) error { return ringDesc, true, nil } - // If the ingester failed to clean its ring entry up in can leave its state in LEAVING - // OR unregister_on_shutdown=false - // Move it into ACTIVE to ensure the ingester joins the ring. - if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) == i.cfg.NumTokens { + tokens := Tokens(instanceDesc.Tokens) + level.Info(i.logger).Log("msg", "existing instance found in ring", "state", instanceDesc.State, "tokens", + len(tokens), "ring", i.RingName) + + // If the ingester fails to clean its ring entry up or unregister_on_shutdown=false, it can leave behind its + // ring state as LEAVING. Make sure to switch to the ACTIVE state. + if instanceDesc.State == LEAVING { + delta := i.cfg.NumTokens - len(tokens) + if delta > 0 { + // We need more tokens + level.Info(i.logger).Log("msg", "existing instance has too few tokens, adding difference", + "current_tokens", len(tokens), "desired_tokens", i.cfg.NumTokens) + newTokens := i.tokenGenerator.GenerateTokens(delta, ringDesc.GetTokens()) + tokens = append(tokens, newTokens...) + sort.Sort(tokens) + } else if delta < 0 { + // We have too many tokens + level.Info(i.logger).Log("msg", "existing instance has too many tokens, removing difference", + "current_tokens", len(tokens), "desired_tokens", i.cfg.NumTokens) + // Make sure we don't pick the N smallest tokens, since that would increase the chance of the instance receiving only smaller hashes. + rand.Shuffle(len(tokens), tokens.Swap) + tokens = tokens[0:i.cfg.NumTokens] + sort.Sort(tokens) + } + instanceDesc.State = ACTIVE + instanceDesc.Tokens = tokens } - // We're taking over this entry, update instanceDesc with our values - instanceDesc.Addr = i.Addr - instanceDesc.Zone = i.Zone - - // We exist in the ring, so assume the ring is right and copy out tokens & state out of there. + // Set the local state based on the updated instance. i.setState(instanceDesc.State) - tokens, _ := ringDesc.TokensFor(i.ID) i.setTokens(tokens) - level.Info(i.logger).Log("msg", "existing entry found in ring", "state", i.GetState(), "tokens", len(tokens), "ring", i.RingName) + // We're taking over this entry, update instanceDesc with our values + instanceDesc.Id = i.ID + instanceDesc.Addr = i.Addr + instanceDesc.Zone = i.Zone // Update the ring if the instance has been changed. We don't want to rely on heartbeat update, as heartbeat // can be configured to long time, and until then lifecycler would not report this instance as ready in CheckReady. @@ -639,22 +722,17 @@ func (i *Lifecycler) verifyTokens(ctx context.Context) bool { result := false err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { - var ringDesc *Desc - if in == nil { - ringDesc = NewDesc() - } else { - ringDesc = in.(*Desc) - } + ringDesc := GetOrCreateRingDesc(in) // At this point, we should have the same tokens as we have registered before ringTokens, takenTokens := ringDesc.TokensFor(i.ID) if !i.compareTokens(ringTokens) { - // uh, oh... our tokens are not our anymore. Let's try new ones. + // uh, oh... our tokens are not ours anymore. Let's try new ones. needTokens := i.cfg.NumTokens - len(ringTokens) level.Info(i.logger).Log("msg", "generating new tokens", "count", needTokens, "ring", i.RingName) - newTokens := GenerateTokens(needTokens, takenTokens) + newTokens := i.tokenGenerator.GenerateTokens(needTokens, takenTokens) ringTokens = append(ringTokens, newTokens...) sort.Sort(ringTokens) @@ -697,16 +775,62 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool { return true } +func (i *Lifecycler) waitBeforeJoining(ctx context.Context) error { + if !i.tokenGenerator.CanJoinEnabled() { + return nil + } + + level.Info(i.logger).Log("msg", "waiting to be able to join the ring", "ring", i.RingName, "id", i.cfg.ID, "timeout", i.canJoinTimeout) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, i.canJoinTimeout) + defer cancel() + retries := backoff.New(ctxWithTimeout, backoff.Config{ + MinBackoff: 1 * time.Second, + MaxBackoff: 1 * time.Second, + MaxRetries: 0, + }) + + var lastError error + for ; retries.Ongoing(); retries.Wait() { + var desc interface{} + desc, lastError = i.KVStore.Get(ctxWithTimeout, i.RingKey) + if lastError != nil { + lastError = errors.Wrap(lastError, "error getting the ring from the KV store") + continue + } + + ringDesc, ok := desc.(*Desc) + if !ok || ringDesc == nil { + lastError = fmt.Errorf("no ring returned from the KV store") + continue + } + lastError = i.tokenGenerator.CanJoin(ringDesc.GetIngesters()) + if lastError == nil { + level.Info(i.logger).Log("msg", "it is now possible to join the ring", "ring", i.RingName, "id", i.cfg.ID, "retries", retries.NumRetries()) + return nil + } + } + + if lastError == nil { + lastError = retries.Err() + } + level.Warn(i.logger).Log("msg", "there was a problem while checking whether this instance could join the ring - will continue anyway", "ring", i.RingName, "id", i.cfg.ID, "err", lastError) + + // Return error only in case the parent context has been cancelled. + // In all other cases, we just want to swallow the error and move on. + return ctx.Err() +} + // autoJoin selects random tokens & moves state to targetState func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) error { - var ringDesc *Desc + err := i.waitBeforeJoining(ctx) + if err != nil { + return err + } - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { - if in == nil { - ringDesc = NewDesc() - } else { - ringDesc = in.(*Desc) - } + var ringDesc *Desc + err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc = GetOrCreateRingDesc(in) // At this point, we should not have any tokens, and we should be in PENDING state. myTokens, takenTokens := ringDesc.TokensFor(i.ID) @@ -714,7 +838,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er level.Error(i.logger).Log("msg", "tokens already exist for this instance - wasn't expecting any!", "num_tokens", len(myTokens), "ring", i.RingName) } - newTokens := GenerateTokens(i.cfg.NumTokens-len(myTokens), takenTokens) + newTokens := i.tokenGenerator.GenerateTokens(i.cfg.NumTokens-len(myTokens), takenTokens) i.setState(targetState) myTokens = append(myTokens, newTokens...) @@ -722,7 +846,6 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er i.setTokens(myTokens) ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt()) - return ringDesc, true, nil }) @@ -740,30 +863,23 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { var ringDesc *Desc err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { - if in == nil { - ringDesc = NewDesc() - } else { - ringDesc = in.(*Desc) - } + ringDesc = GetOrCreateRingDesc(in) - instanceDesc, ok := ringDesc.Ingesters[i.ID] + var tokens Tokens + instanceDesc, exists := ringDesc.Ingesters[i.ID] - if !ok { + if !exists { // If the instance is missing in the ring, we need to add it back. However, due to how shuffle sharding work, // the missing instance for some period of time could have cause a resharding of tenants among instances: // to guarantee query correctness we need to update the registration timestamp to current time. level.Info(i.logger).Log("msg", "instance is missing in the ring (e.g. the ring backend storage has been reset), registering the instance with an updated registration timestamp", "ring", i.RingName) i.setRegisteredAt(time.Now()) - ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt()) + tokens = i.getTokens() } else { - instanceDesc.Timestamp = time.Now().Unix() - instanceDesc.State = i.GetState() - instanceDesc.Addr = i.Addr - instanceDesc.Zone = i.Zone - instanceDesc.RegisteredTimestamp = i.getRegisteredAt().Unix() - ringDesc.Ingesters[i.ID] = instanceDesc + tokens = instanceDesc.Tokens } + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokens, i.GetState(), i.getRegisteredAt()) return ringDesc, true, nil }) @@ -795,13 +911,15 @@ func (i *Lifecycler) changeState(ctx context.Context, state InstanceState) error func (i *Lifecycler) updateCounters(ringDesc *Desc) { healthyInstancesCount := 0 - zones := map[string]struct{}{} + instancesCount := 0 + zones := map[string]int{} if ringDesc != nil { now := time.Now() for _, ingester := range ringDesc.Ingesters { - zones[ingester.Zone] = struct{}{} + zones[ingester.Zone]++ + instancesCount++ // Count the number of healthy instances for Write operation. if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, now) { @@ -813,6 +931,8 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) { // Update counters i.countersLock.Lock() i.healthyInstancesCount = healthyInstancesCount + i.instancesCount = instancesCount + i.instancesInZoneCount = zones[i.cfg.Zone] i.zonesCount = len(zones) i.countersLock.Unlock() } diff --git a/vendor/github.com/grafana/dskit/ring/model.go b/vendor/github.com/grafana/dskit/ring/model.go index fd58e534..956dbe0c 100644 --- a/vendor/github.com/grafana/dskit/ring/model.go +++ b/vendor/github.com/grafana/dskit/ring/model.go @@ -1,8 +1,8 @@ package ring import ( - "container/heap" "fmt" + "math" "sort" "sync" "time" @@ -11,6 +11,7 @@ import ( "github.com/grafana/dskit/kv/codec" "github.com/grafana/dskit/kv/memberlist" + "github.com/grafana/dskit/loser" ) // ByAddr is a sortable list of InstanceDesc. @@ -50,6 +51,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Instanc } ingester := InstanceDesc{ + Id: id, Addr: addr, Timestamp: time.Now().Unix(), RegisteredTimestamp: registeredTimestamp, @@ -476,7 +478,7 @@ func (d *Desc) GetTokens() []uint32 { func (d *Desc) getTokensByZone() map[string][]uint32 { zones := map[string][][]uint32{} for _, instance := range d.Ingesters { - // Tokens may not be sorted for an older version which, so we enforce sorting here. + // Tokens may not be sorted for an older version, so we enforce sorting here. tokens := instance.Tokens if !sort.IsSorted(Tokens(tokens)) { sort.Sort(Tokens(tokens)) @@ -583,39 +585,20 @@ func (d *Desc) RingCompare(o *Desc) CompareResult { return EqualButStatesAndTimestamps } +// setInstanceIDs sets the ID of each InstanceDesc object managed by this Desc +func (d *Desc) setInstanceIDs() { + for id, inst := range d.Ingesters { + inst.Id = id + d.Ingesters[id] = inst + } +} + func GetOrCreateRingDesc(d interface{}) *Desc { if d == nil { return NewDesc() } - return d.(*Desc) -} - -// TokensHeap is an heap data structure used to merge multiple lists -// of sorted tokens into a single one. -type TokensHeap [][]uint32 - -func (h TokensHeap) Len() int { - return len(h) -} - -func (h TokensHeap) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h TokensHeap) Less(i, j int) bool { - return h[i][0] < h[j][0] -} - -func (h *TokensHeap) Push(x interface{}) { - *h = append(*h, x.([]uint32)) -} -func (h *TokensHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x + return d.(*Desc) } // MergeTokens takes in input multiple lists of tokens and returns a single list @@ -624,34 +607,15 @@ func (h *TokensHeap) Pop() interface{} { func MergeTokens(instances [][]uint32) []uint32 { numTokens := 0 - // Build the heap. - h := make(TokensHeap, 0, len(instances)) for _, tokens := range instances { - if len(tokens) == 0 { - continue - } - - // We can safely append the input slice because elements inside are never shuffled. - h = append(h, tokens) numTokens += len(tokens) } - heap.Init(&h) + tree := loser.New(instances, math.MaxUint32) out := make([]uint32, 0, numTokens) - for h.Len() > 0 { - // The minimum element in the tree is the root, at index 0. - lowest := h[0] - out = append(out, lowest[0]) - - if len(lowest) > 1 { - // Remove the first token from the lowest because we popped it - // and then fix the heap to keep it sorted. - h[0] = h[0][1:] - heap.Fix(&h, 0) - } else { - heap.Remove(&h, 0) - } + for tree.Next() { + out = append(out, tree.Winner()) } return out diff --git a/vendor/github.com/grafana/dskit/ring/ownership_priority_queue.go b/vendor/github.com/grafana/dskit/ring/ownership_priority_queue.go new file mode 100644 index 00000000..7c61da02 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/ownership_priority_queue.go @@ -0,0 +1,136 @@ +package ring + +import ( + "fmt" + "math" + "strings" +) + +type ringItem interface { + key() int + String() string +} + +type ringInstance struct { + instanceID int +} + +func (ri ringInstance) key() int { + return ri.instanceID +} + +func (ri ringInstance) String() string { + return fmt.Sprintf("[instanceID: %d]", ri.instanceID) +} + +type ringToken struct { + token uint32 + prevToken uint32 +} + +func (rt ringToken) key() int { + return int(rt.token) +} + +func (rt ringToken) String() string { + return fmt.Sprintf("[token: %d, prevToken: %d]", rt.token, rt.prevToken) +} + +type ownershipInfo[T ringItem] struct { + item T + ownership float64 +} + +func newRingTokenOwnershipInfo(token, prevToken uint32) ownershipInfo[ringToken] { + ownership := float64(tokenDistance(prevToken, token)) + return ownershipInfo[ringToken]{ + ownership: ownership, + item: ringToken{ + token: token, + prevToken: prevToken, + }, + } +} + +func newRingInstanceOwnershipInfo(instanceID int, ownership float64) ownershipInfo[ringInstance] { + return ownershipInfo[ringInstance]{ + ownership: ownership, + item: ringInstance{ + instanceID: instanceID, + }, + } +} + +// ownershipPriorityQueue is a max-heap, i.e., a priority queue +// where items with a higher priority will be extracted first. +// Namely, items with a higher ownership have a higher priority. +// In order to guarantee that 2 instances of ownershipPriorityQueue +// with the same items always assign equal priorities to equal items, +// in the case of items with equal ownership, we rely on the +// order of item ids. +type ownershipPriorityQueue[T ringItem] struct { + items []ownershipInfo[T] +} + +func newPriorityQueue[T ringItem](capacity int) ownershipPriorityQueue[T] { + return ownershipPriorityQueue[T]{ + items: make([]ownershipInfo[T], 0, capacity), + } +} + +func (pq *ownershipPriorityQueue[T]) Len() int { + return len(pq.items) +} + +func (pq *ownershipPriorityQueue[T]) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] +} + +func (pq *ownershipPriorityQueue[T]) Less(i, j int) bool { + if pq.items[i].ownership == pq.items[j].ownership { + // In order to guarantee the stability, i.e., that the same instanceID and zone as input + // always generate the same slice of tokens as output, we enforce that by equal ownership + // higher priority is determined by the order of ids. + return pq.items[i].item.key() > pq.items[j].item.key() + } + // We are implementing a max-heap, so we are using > here. + // Since we compare float64, NaN values must be placed at the end. + return pq.items[i].ownership > pq.items[j].ownership || (math.IsNaN(pq.items[j].ownership) && !math.IsNaN(pq.items[i].ownership)) +} + +// Push implements heap.Push(any). It pushes the element item onto ownershipPriorityQueue. +func (pq *ownershipPriorityQueue[T]) Push(item any) { + ownershipInfo := item.(ownershipInfo[T]) + pq.items = append(pq.items, ownershipInfo) +} + +// Pop implements heap.Pop(). It removes and returns the element with the highest priority from ownershipPriorityQueue. +func (pq *ownershipPriorityQueue[T]) Pop() any { + n := len(pq.items) + item := pq.items[n-1] + pq.items = pq.items[0 : n-1] + return item +} + +// Peek the returns the element with the highest priority from ownershipPriorityQueue, +// but it does not remove it from the latter. Time complexity is O(1). +func (pq *ownershipPriorityQueue[T]) Peek() *ownershipInfo[T] { + if len(pq.items) == 0 { + return nil + } + return &pq.items[0] +} + +func (pq *ownershipPriorityQueue[T]) String() string { + return fmt.Sprintf("[%s]", strings.Join(mapItems(pq.items, func(item ownershipInfo[T]) string { + return fmt.Sprintf("%s-ownership: %.3f", item.item, item.ownership) + }), ",")) +} + +func mapItems[T, V any](in []T, mapItem func(T) V) []V { + out := make([]V, len(in)) + for i, v := range in { + out[i] = mapItem(v) + } + return out +} diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index b7322713..f389f476 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -2,8 +2,16 @@ package ring import ( "context" + "errors" + "fmt" "sort" "time" + + kitlog "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go/ext" + + "github.com/grafana/dskit/spanlogger" ) // ReplicationSet describes the instances to talk to for a given key, and how @@ -18,28 +26,24 @@ type ReplicationSet struct { // Maximum number of different zones in which instances can fail. Max unavailable zones and // max errors are mutually exclusive. MaxUnavailableZones int + + ZoneAwarenessEnabled bool } // Do function f in parallel for all replicas in the set, erroring if we exceed // MaxErrors and returning early otherwise. // Return a slice of all results from f, or nil if an error occurred. func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) { - type instanceResult struct { - res interface{} - err error - instance *InstanceDesc - } - // Initialise the result tracker, which is use to keep track of successes and failures. var tracker replicationSetResultTracker if r.MaxUnavailableZones > 0 { - tracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones) + tracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, kitlog.NewNopLogger()) } else { - tracker = newDefaultResultTracker(r.Instances, r.MaxErrors) + tracker = newDefaultResultTracker(r.Instances, r.MaxErrors, kitlog.NewNopLogger()) } var ( - ch = make(chan instanceResult, len(r.Instances)) + ch = make(chan instanceResult[any], len(r.Instances)) forceStart = make(chan struct{}, r.MaxErrors) ) ctx, cancel := context.WithCancel(ctx) @@ -60,8 +64,8 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont } } result, err := f(ctx, ing) - ch <- instanceResult{ - res: result, + ch <- instanceResult[any]{ + result: result, err: err, instance: ing, } @@ -84,17 +88,294 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont forceStart <- struct{}{} } } else { - results = append(results, res.res) + results = append(results, res.result) + } + + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return results, nil +} + +type DoUntilQuorumConfig struct { + // MinimizeRequests enables request minimization. + // See docs for DoUntilQuorum for more information. + MinimizeRequests bool + + // HedgingDelay configures the delay used before initiating hedged requests. + // Hedging is only enabled if HedgingDelay is non-zero and MinimizeRequests is true. + // See docs for DoUntilQuorum for more information. + HedgingDelay time.Duration + + // Logger to emit log lines and span events to during the call. + // Can be nil, in which case no log lines or span events are emitted. + Logger *spanlogger.SpanLogger + + // IsTerminalError allows DoUntilQuorum to detect terminal errors generated by requests. + // + // If IsTerminalError is non-nil and a request returns an error for which IsTerminalError returns true, + // DoUntilQuorum will immediately cancel any inflight requests and return the error. + // + // This is useful to cancel DoUntilQuorum when an unrecoverable error occurs and it does not + // make sense to attempt requests to other instances. For example, if a client-side limit on the + // total response size across all instances is reached, making further requests to other + // instances would not be worthwhile. + IsTerminalError func(error) bool +} + +func (c DoUntilQuorumConfig) Validate() error { + if c.HedgingDelay < 0 { + return errors.New("invalid DoUntilQuorumConfig: HedgingDelay must be non-negative") + } + + return nil +} + +// DoUntilQuorum runs function f in parallel for all replicas in r. +// +// # Result selection +// +// If r.MaxUnavailableZones is greater than zero, or r.ZoneAwarenessEnabled is true, DoUntilQuorum operates in zone-aware mode: +// - DoUntilQuorum returns an error if calls to f for instances in more than r.MaxUnavailableZones zones return errors +// - Otherwise, DoUntilQuorum returns all results from all replicas in the first zones for which f succeeds +// for every instance in that zone (eg. if there are 3 zones and r.MaxUnavailableZones is 1, DoUntilQuorum will +// return the results from all instances in 2 zones, even if all calls to f succeed). +// +// Otherwise, DoUntilQuorum operates in non-zone-aware mode: +// - DoUntilQuorum returns an error if more than r.MaxErrors calls to f return errors +// - Otherwise, DoUntilQuorum returns all results from the first len(r.Instances) - r.MaxErrors instances +// (eg. if there are 6 replicas and r.MaxErrors is 2, DoUntilQuorum will return the results from the first 4 +// successful calls to f, even if all 6 calls to f succeed). +// +// # Request minimization +// +// cfg.MinimizeRequests enables or disables request minimization. +// +// Regardless of the value of cfg.MinimizeRequests, if one of the termination conditions above is satisfied or ctx is +// cancelled before f is called for an instance, f may not be called for that instance at all. +// +// ## When disabled +// +// If request minimization is disabled, DoUntilQuorum will call f for each instance in r. The value of cfg.HedgingDelay +// is ignored. +// +// ## When enabled +// +// If request minimization is enabled, DoUntilQuorum will initially call f for the minimum number of instances needed to +// reach the termination conditions above, and later call f for further instances if required. For example, if +// r.MaxUnavailableZones is 1 and there are three zones, DoUntilQuorum will initially only call f for instances in two +// zones, and only call f for instances in the remaining zone if a request in the initial two zones fails. +// +// DoUntilQuorum will randomly select available zones / instances such that calling DoUntilQuorum multiple times with +// the same ReplicationSet should evenly distribute requests across all zones / instances. +// +// If cfg.HedgingDelay is non-zero, DoUntilQuorum will call f for an additional zone's instances (if zone-aware) / an +// additional instance (if not zone-aware) every cfg.HedgingDelay until one of the termination conditions above is +// reached. For example, if r.MaxUnavailableZones is 2, cfg.HedgingDelay is 4 seconds and there are fives zones, +// DoUntilQuorum will initially only call f for instances in three zones, and unless one of the termination conditions +// is reached earlier, will then call f for instances in a fourth zone approximately 4 seconds later, and then call f +// for instances in the final zone approximately 4 seconds after that (ie. roughly 8 seconds since the call to +// DoUntilQuorum began). +// +// # Cleanup +// +// Any results from successful calls to f that are not returned by DoUntilQuorum will be passed to cleanupFunc, +// including when DoUntilQuorum returns an error or only returns a subset of successful results. cleanupFunc may +// be called both before and after DoUntilQuorum returns. +// +// A call to f is considered successful if it returns a nil error. +// +// # Contexts +// +// The context.Context passed to an invocation of f may be cancelled at any time if the result of that invocation of +// f will not be used. +// +// DoUntilQuorum cancels the context.Context passed to each invocation of f before DoUntilQuorum returns. +func DoUntilQuorum[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc) (T, error), cleanupFunc func(T)) ([]T, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wrappedF := func(ctx context.Context, desc *InstanceDesc, _ context.CancelFunc) (T, error) { + return f(ctx, desc) + } + + return DoUntilQuorumWithoutSuccessfulContextCancellation(ctx, r, cfg, wrappedF, cleanupFunc) +} + +// DoUntilQuorumWithoutSuccessfulContextCancellation behaves the same as DoUntilQuorum, except it does not cancel +// the context.Context passed to invocations of f whose results are returned. +// +// For example, this is useful in situations where DoUntilQuorumWithoutSuccessfulContextCancellation is used +// to establish a set of streams that will be used after DoUntilQuorumWithoutSuccessfulContextCancellation returns. +// +// It is the caller's responsibility to ensure that either of the following are eventually true: +// - ctx is cancelled, or +// - the corresponding context.CancelFunc is called for all invocations of f whose results are returned by +// DoUntilQuorumWithoutSuccessfulContextCancellation +// +// Failing to do this may result in a memory leak. +func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelFunc) (T, error), cleanupFunc func(T)) ([]T, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + + if r.ZoneAwarenessEnabled && r.MaxErrors > 0 { + return nil, fmt.Errorf("invalid ReplicationSet: MaxErrors is non-zero (is %v) and ZoneAwarenessEnabled is true", r.MaxErrors) + } + + var logger kitlog.Logger = cfg.Logger + if cfg.Logger == nil { + logger = kitlog.NewNopLogger() + } + + resultsChan := make(chan instanceResult[T], len(r.Instances)) + resultsRemaining := len(r.Instances) + + defer func() { + go func() { + for resultsRemaining > 0 { + result := <-resultsChan + resultsRemaining-- + + if result.err == nil { + cleanupFunc(result.result) + } + } + }() + }() + + var resultTracker replicationSetResultTracker + var contextTracker replicationSetContextTracker + if r.MaxUnavailableZones > 0 || r.ZoneAwarenessEnabled { + resultTracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, logger) + contextTracker = newZoneAwareContextTracker(ctx, r.Instances) + } else { + resultTracker = newDefaultResultTracker(r.Instances, r.MaxErrors, logger) + contextTracker = newDefaultContextTracker(ctx, r.Instances) + } + + if cfg.MinimizeRequests { + resultTracker.startMinimumRequests() + } else { + resultTracker.startAllRequests() + } + + for i := range r.Instances { + instance := &r.Instances[i] + ctx, ctxCancel := contextTracker.contextFor(instance) + + go func(desc *InstanceDesc) { + if err := resultTracker.awaitStart(ctx, desc); err != nil { + // Post to resultsChan so that the deferred cleanup handler above eventually terminates. + resultsChan <- instanceResult[T]{ + err: err, + instance: desc, + } + + return } + result, err := f(ctx, desc, ctxCancel) + resultsChan <- instanceResult[T]{ + result: result, + err: err, + instance: desc, + } + }(instance) + } + + resultsMap := make(map[*InstanceDesc]T, len(r.Instances)) + cleanupResultsAlreadyReceived := func() { + for _, result := range resultsMap { + cleanupFunc(result) + } + } + + terminate := func(err error) ([]T, error) { + if cfg.Logger != nil { + ext.Error.Set(cfg.Logger.Span, true) + } + + contextTracker.cancelAllContexts() + cleanupResultsAlreadyReceived() + return nil, err + } + + var hedgingTrigger <-chan time.Time + + if cfg.HedgingDelay > 0 { + ticker := time.NewTicker(cfg.HedgingDelay) + defer ticker.Stop() + hedgingTrigger = ticker.C + } + + for !resultTracker.succeeded() { + select { case <-ctx.Done(): + level.Debug(logger).Log("msg", "parent context done, returning", "err", ctx.Err()) + + // No need to cancel individual instance contexts, as they inherit the cancellation from ctx. + cleanupResultsAlreadyReceived() + return nil, ctx.Err() + case <-hedgingTrigger: + resultTracker.startAdditionalRequests() + case result := <-resultsChan: + resultsRemaining-- + + if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) { + level.Warn(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) + // We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled. + return terminate(result.err) + } + + resultTracker.done(result.instance, result.err) + + if result.err == nil { + resultsMap[result.instance] = result.result + } else { + contextTracker.cancelContextFor(result.instance) + + if resultTracker.failed() { + level.Error(logger).Log("msg", "cancelling all requests because quorum cannot be reached") + return terminate(result.err) + } + } + } + } + + level.Debug(logger).Log("msg", "quorum reached") + + results := make([]T, 0, len(r.Instances)) + + for i := range r.Instances { + instance := &r.Instances[i] + result, haveResult := resultsMap[instance] + + if haveResult { + if resultTracker.shouldIncludeResultFrom(instance) { + results = append(results, result) + } else { + contextTracker.cancelContextFor(instance) + cleanupFunc(result) + } + } else { + // Nothing to clean up (yet) - this will be handled by deferred call above. + contextTracker.cancelContextFor(instance) } } return results, nil } +type instanceResult[T any] struct { + result T + err error + instance *InstanceDesc +} + // Includes returns whether the replication set includes the replica with the provided addr. func (r ReplicationSet) Includes(addr string) bool { for _, instance := range r.Instances { @@ -128,17 +409,40 @@ func (r ReplicationSet) GetAddressesWithout(exclude string) []string { return addrs } -// HasReplicationSetChanged returns true if two replications sets are the same (with possibly different timestamps), -// false if they differ in any way (number of instances, instance states, tokens, zones, ...). +// ZoneCount returns the number of unique zones represented by instances within this replication set. +func (r *ReplicationSet) ZoneCount() int { + // Why not use a map here? Using a slice is faster for the small number of zones we expect to typically use. + zones := []string{} + + for _, i := range r.Instances { + sawZone := false + + for _, z := range zones { + if z == i.Zone { + sawZone = true + break + } + } + + if !sawZone { + zones = append(zones, i.Zone) + } + } + + return len(zones) +} + +// HasReplicationSetChanged returns false if two replications sets are the same (with possibly different timestamps), +// true if they differ in any way (number of instances, instance states, tokens, zones, ...). func HasReplicationSetChanged(before, after ReplicationSet) bool { return hasReplicationSetChangedExcluding(before, after, func(i *InstanceDesc) { i.Timestamp = 0 }) } -// HasReplicationSetChangedWithoutState returns true if two replications sets +// HasReplicationSetChangedWithoutState returns false if two replications sets // are the same (with possibly different timestamps and instance states), -// false if they differ in any other way (number of instances, tokens, zones, ...). +// true if they differ in any other way (number of instances, tokens, zones, ...). func HasReplicationSetChangedWithoutState(before, after ReplicationSet) bool { return hasReplicationSetChangedExcluding(before, after, func(i *InstanceDesc) { i.Timestamp = 0 diff --git a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go index fcdf5441..d74a3e2a 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go @@ -1,5 +1,15 @@ package ring +import ( + "context" + "errors" + "math/rand" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "go.uber.org/atomic" +) + type replicationSetResultTracker interface { // Signals an instance has done the execution, either successful (no error) // or failed (with error). @@ -10,29 +20,101 @@ type replicationSetResultTracker interface { // Returns true if the maximum number of failed executions have been reached. failed() bool + + // Returns true if the result returned by instance is part of the minimal set of all results + // required to meet the quorum requirements of this tracker. + // This method should only be called for instances that have returned a successful result, + // calling this method for an instance that returned an error may return unpredictable results. + // This method should only be called after succeeded returns true for the first time and before + // calling done any further times. + shouldIncludeResultFrom(instance *InstanceDesc) bool + + // Starts an initial set of requests sufficient to meet the quorum requirements of this tracker. + // Further requests will be started if necessary when done is called with a non-nil error. + // Calling this method multiple times may lead to unpredictable behaviour. + // Calling both this method and releaseAllRequests may lead to unpredictable behaviour. + // This method must only be called before calling done. + startMinimumRequests() + + // Starts additional request(s) as defined by the quorum requirements of this tracker. + // For example, a zone-aware tracker would start requests for another zone, whereas a + // non-zone-aware tracker would start a request for another instance. + // This method must only be called after calling startMinimumRequests or startAllRequests. + // If requests for all instances have already been started, this method does nothing. + // This method must only be called before calling done. + startAdditionalRequests() + + // Starts requests for all instances. + // Calling this method multiple times may lead to unpredictable behaviour. + // Calling both this method and releaseMinimumRequests may lead to unpredictable behaviour. + // This method must only be called before calling done. + startAllRequests() + + // Blocks until the request for this instance should be started. + // Returns nil if the request should be started, or a non-nil error if the request is not required + // or ctx has been cancelled. + // Must only be called after releaseMinimumRequests or releaseAllRequests returns. + // Calling this method multiple times for the same instance may lead to unpredictable behaviour. + awaitStart(ctx context.Context, instance *InstanceDesc) error +} + +type replicationSetContextTracker interface { + // Returns a context.Context and context.CancelFunc for instance. + // The context.CancelFunc will only cancel the context for this instance (ie. if this tracker + // is zone-aware, calling the context.CancelFunc should not cancel contexts for other instances + // in the same zone). + contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) + + // Cancels the context for instance previously obtained with contextFor. + // This method may cancel the context for other instances if those other instances are part of + // the same zone and this tracker is zone-aware. + cancelContextFor(instance *InstanceDesc) + + // Cancels all contexts previously obtained with contextFor. + cancelAllContexts() } +var errResultNotNeeded = errors.New("result from this instance is not needed") + type defaultResultTracker struct { - minSucceeded int - numSucceeded int - numErrors int - maxErrors int + minSucceeded int + numSucceeded int + numErrors int + maxErrors int + instances []InstanceDesc + instanceRelease map[*InstanceDesc]chan struct{} + pendingInstances []*InstanceDesc + logger log.Logger } -func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultResultTracker { +func newDefaultResultTracker(instances []InstanceDesc, maxErrors int, logger log.Logger) *defaultResultTracker { return &defaultResultTracker{ minSucceeded: len(instances) - maxErrors, numSucceeded: 0, numErrors: 0, maxErrors: maxErrors, + instances: instances, + logger: logger, } } -func (t *defaultResultTracker) done(_ *InstanceDesc, err error) { +func (t *defaultResultTracker) done(instance *InstanceDesc, err error) { if err == nil { t.numSucceeded++ + + if t.succeeded() { + t.onSucceeded() + } } else { + level.Warn(t.logger).Log( + "msg", "instance failed", + "instanceAddr", instance.Addr, + "instanceID", instance.Id, + "err", err, + ) + t.numErrors++ + t.startAdditionalRequestsDueTo("failure of other instance") } } @@ -40,10 +122,122 @@ func (t *defaultResultTracker) succeeded() bool { return t.numSucceeded >= t.minSucceeded } +func (t *defaultResultTracker) onSucceeded() { + // We don't need any of the requests that are waiting to be released. Signal that they should abort. + for _, i := range t.pendingInstances { + close(t.instanceRelease[i]) + } + + t.pendingInstances = nil +} + func (t *defaultResultTracker) failed() bool { return t.numErrors > t.maxErrors } +func (t *defaultResultTracker) shouldIncludeResultFrom(_ *InstanceDesc) bool { + return true +} + +func (t *defaultResultTracker) startMinimumRequests() { + t.instanceRelease = make(map[*InstanceDesc]chan struct{}, len(t.instances)) + + for i := range t.instances { + instance := &t.instances[i] + t.instanceRelease[instance] = make(chan struct{}, 1) + } + + releaseOrder := rand.Perm(len(t.instances)) + t.pendingInstances = make([]*InstanceDesc, 0, t.maxErrors) + + for _, instanceIdx := range releaseOrder { + instance := &t.instances[instanceIdx] + + if len(t.pendingInstances) < t.maxErrors { + t.pendingInstances = append(t.pendingInstances, instance) + } else { + level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instanceAddr", instance.Addr, "instanceID", instance.Id) + t.instanceRelease[instance] <- struct{}{} + } + } + + // If we've already succeeded (which should only happen if the replica set is misconfigured with MaxErrors >= the number of instances), + // then make sure we don't block requests forever. + if t.succeeded() { + t.onSucceeded() + } +} + +func (t *defaultResultTracker) startAdditionalRequests() { + t.startAdditionalRequestsDueTo("hedging") +} + +func (t *defaultResultTracker) startAdditionalRequestsDueTo(reason string) { + if len(t.pendingInstances) > 0 { + // There are some outstanding requests we could make before we reach maxErrors. Release the next one. + i := t.pendingInstances[0] + level.Debug(t.logger).Log("msg", "starting request to instance", "reason", reason, "instanceAddr", i.Addr, "instanceID", i.Id) + t.instanceRelease[i] <- struct{}{} + t.pendingInstances = t.pendingInstances[1:] + } +} + +func (t *defaultResultTracker) startAllRequests() { + t.instanceRelease = make(map[*InstanceDesc]chan struct{}, len(t.instances)) + + for i := range t.instances { + instance := &t.instances[i] + level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instanceAddr", instance.Addr, "instanceID", instance.Id) + t.instanceRelease[instance] = make(chan struct{}, 1) + t.instanceRelease[instance] <- struct{}{} + } +} + +func (t *defaultResultTracker) awaitStart(ctx context.Context, instance *InstanceDesc) error { + select { + case <-ctx.Done(): + return ctx.Err() + case _, ok := <-t.instanceRelease[instance]: + if ok { + return nil + } + + return errResultNotNeeded + } +} + +type defaultContextTracker struct { + ctx context.Context + cancelFuncs map[*InstanceDesc]context.CancelFunc +} + +func newDefaultContextTracker(ctx context.Context, instances []InstanceDesc) *defaultContextTracker { + return &defaultContextTracker{ + ctx: ctx, + cancelFuncs: make(map[*InstanceDesc]context.CancelFunc, len(instances)), + } +} + +func (t *defaultContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(t.ctx) + t.cancelFuncs[instance] = cancel + return ctx, cancel +} + +func (t *defaultContextTracker) cancelContextFor(instance *InstanceDesc) { + if cancel, ok := t.cancelFuncs[instance]; ok { + cancel() + delete(t.cancelFuncs, instance) + } +} + +func (t *defaultContextTracker) cancelAllContexts() { + for instance, cancel := range t.cancelFuncs { + cancel() + delete(t.cancelFuncs, instance) + } +} + // zoneAwareResultTracker tracks the results per zone. // All instances in a zone must succeed in order for the zone to succeed. type zoneAwareResultTracker struct { @@ -51,28 +245,55 @@ type zoneAwareResultTracker struct { failuresByZone map[string]int minSuccessfulZones int maxUnavailableZones int + zoneRelease map[string]chan struct{} + zoneShouldStart map[string]*atomic.Bool + pendingZones []string + logger log.Logger } -func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int) *zoneAwareResultTracker { +func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int, logger log.Logger) *zoneAwareResultTracker { t := &zoneAwareResultTracker{ waitingByZone: make(map[string]int), failuresByZone: make(map[string]int), maxUnavailableZones: maxUnavailableZones, + logger: logger, } for _, instance := range instances { t.waitingByZone[instance.Zone]++ } + t.minSuccessfulZones = len(t.waitingByZone) - maxUnavailableZones + if t.minSuccessfulZones < 0 { + t.minSuccessfulZones = 0 + } + return t } func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) { t.waitingByZone[instance.Zone]-- - if err != nil { + if err == nil { + if t.succeeded() { + t.onSucceeded() + } + } else { t.failuresByZone[instance.Zone]++ + + if t.failuresByZone[instance.Zone] == 1 { + level.Warn(t.logger).Log( + "msg", "request to instance has failed, zone cannot contribute to quorum", + "zone", instance.Zone, + "failingInstanceAddr", instance.Addr, + "failingInstanceID", instance.Id, + "err", err, + ) + + // If this was the first failure for this zone, release another zone's requests and signal they should start. + t.startAdditionalRequestsDueTo("failure of other zone") + } } } @@ -90,7 +311,143 @@ func (t *zoneAwareResultTracker) succeeded() bool { return successfulZones >= t.minSuccessfulZones } +func (t *zoneAwareResultTracker) onSucceeded() { + // We don't need any of the requests that are waiting to be released. Signal that they should abort. + for _, zone := range t.pendingZones { + t.releaseZone(zone, false) + } + + t.pendingZones = nil +} + func (t *zoneAwareResultTracker) failed() bool { failedZones := len(t.failuresByZone) return failedZones > t.maxUnavailableZones } + +func (t *zoneAwareResultTracker) shouldIncludeResultFrom(instance *InstanceDesc) bool { + return t.failuresByZone[instance.Zone] == 0 && t.waitingByZone[instance.Zone] == 0 +} + +func (t *zoneAwareResultTracker) startMinimumRequests() { + t.createReleaseChannels() + + allZones := make([]string, 0, len(t.waitingByZone)) + + for zone := range t.waitingByZone { + allZones = append(allZones, zone) + } + + rand.Shuffle(len(allZones), func(i, j int) { + allZones[i], allZones[j] = allZones[j], allZones[i] + }) + + for i := 0; i < t.minSuccessfulZones; i++ { + level.Debug(t.logger).Log("msg", "starting requests to zone", "reason", "initial requests", "zone", allZones[i]) + t.releaseZone(allZones[i], true) + } + + t.pendingZones = allZones[t.minSuccessfulZones:] + + // If we've already succeeded (which should only happen if the replica set is misconfigured with MaxUnavailableZones >= the number of zones), + // then make sure we don't block requests forever. + if t.succeeded() { + t.onSucceeded() + } +} + +func (t *zoneAwareResultTracker) startAdditionalRequests() { + t.startAdditionalRequestsDueTo("hedging") +} + +func (t *zoneAwareResultTracker) startAdditionalRequestsDueTo(reason string) { + if len(t.pendingZones) > 0 { + // If there are more zones we could try before reaching maxUnavailableZones, release another zone's requests and signal they should start. + level.Debug(t.logger).Log("msg", "starting requests to zone", "reason", reason, "zone", t.pendingZones[0]) + t.releaseZone(t.pendingZones[0], true) + t.pendingZones = t.pendingZones[1:] + } +} + +func (t *zoneAwareResultTracker) startAllRequests() { + t.createReleaseChannels() + + for zone := range t.waitingByZone { + level.Debug(t.logger).Log("msg", "starting requests to zone", "reason", "initial requests", "zone", zone) + t.releaseZone(zone, true) + } +} + +func (t *zoneAwareResultTracker) createReleaseChannels() { + t.zoneRelease = make(map[string]chan struct{}, len(t.waitingByZone)) + t.zoneShouldStart = make(map[string]*atomic.Bool, len(t.waitingByZone)) + + for zone := range t.waitingByZone { + t.zoneRelease[zone] = make(chan struct{}) + t.zoneShouldStart[zone] = atomic.NewBool(false) + } +} + +func (t *zoneAwareResultTracker) releaseZone(zone string, shouldStart bool) { + t.zoneShouldStart[zone].Store(shouldStart) + close(t.zoneRelease[zone]) +} + +func (t *zoneAwareResultTracker) awaitStart(ctx context.Context, instance *InstanceDesc) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.zoneRelease[instance.Zone]: + if t.zoneShouldStart[instance.Zone].Load() { + return nil + } + + return errResultNotNeeded + } +} + +type zoneAwareContextTracker struct { + contexts map[*InstanceDesc]context.Context + cancelFuncs map[*InstanceDesc]context.CancelFunc +} + +func newZoneAwareContextTracker(ctx context.Context, instances []InstanceDesc) *zoneAwareContextTracker { + t := &zoneAwareContextTracker{ + contexts: make(map[*InstanceDesc]context.Context, len(instances)), + cancelFuncs: make(map[*InstanceDesc]context.CancelFunc, len(instances)), + } + + for i := range instances { + instance := &instances[i] + ctx, cancel := context.WithCancel(ctx) + t.contexts[instance] = ctx + t.cancelFuncs[instance] = cancel + } + + return t +} + +func (t *zoneAwareContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) { + return t.contexts[instance], t.cancelFuncs[instance] +} + +func (t *zoneAwareContextTracker) cancelContextFor(instance *InstanceDesc) { + // Why not create a per-zone parent context to make this easier? + // If we create a per-zone parent context, we'd need to have some way to cancel the per-zone context when the last of the individual + // contexts in a zone are cancelled using the context.CancelFunc returned from contextFor. + for i, cancel := range t.cancelFuncs { + if i.Zone == instance.Zone { + cancel() + delete(t.contexts, i) + delete(t.cancelFuncs, i) + } + } +} + +func (t *zoneAwareContextTracker) cancelAllContexts() { + for instance, cancel := range t.cancelFuncs { + cancel() + delete(t.contexts, instance) + delete(t.cancelFuncs, instance) + } +} diff --git a/vendor/github.com/grafana/dskit/ring/ring.go b/vendor/github.com/grafana/dskit/ring/ring.go index 493e6ac1..947f3290 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.go +++ b/vendor/github.com/grafana/dskit/ring/ring.go @@ -6,7 +6,6 @@ import ( "context" "flag" "fmt" - "math" "math/rand" "net/http" @@ -38,7 +37,6 @@ const ( // ReadRing represents the read interface to the ring. type ReadRing interface { - // Get returns n (or more) instances which form the replicas for the given key. // bufDescs, bufHosts and bufZones are slices to be overwritten for the return value // to avoid memory allocation; can be nil, or created with ring.MakeBuffersForGet(). @@ -153,7 +151,7 @@ type instanceInfo struct { Zone string } -// Ring holds the information about the members of the consistent hash ring. +// Ring is a Service that maintains an in-memory copy of a ring and watches for changes. type Ring struct { services.Service @@ -172,7 +170,7 @@ type Ring struct { oldestRegisteredTimestamp int64 // Maps a token with the information of the instance holding it. This map is immutable and - // cannot be chanced in place because it's shared "as is" between subrings (the only way to + // cannot be changed in place because it's shared "as is" between subrings (the only way to // change it is to create a new one and replace it). ringInstanceByToken map[uint32]instanceInfo @@ -185,7 +183,8 @@ type Ring struct { // Cache of shuffle-sharded subrings per identifier. Invalidated when topology changes. // If set to nil, no caching is done (used by tests, and subrings). - shuffledSubringCache map[subringCacheKey]*Ring + shuffledSubringCache map[subringCacheKey]*Ring + shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback numMembersGaugeVec *prometheus.GaugeVec totalTokensGauge prometheus.Gauge @@ -195,8 +194,15 @@ type Ring struct { } type subringCacheKey struct { - identifier string - shardSize int + identifier string + shardSize int + lookbackPeriod time.Duration +} + +type cachedSubringWithLookback struct { + subring *Ring + validForLookbackWindowsStartingAfter int64 // if the lookback window is from T to S, validForLookbackWindowsStartingAfter is the earliest value of T this cache entry is valid for + validForLookbackWindowsStartingBefore int64 // if the lookback window is from T to S, validForLookbackWindowsStartingBefore is the latest value of T this cache entry is valid for } // New creates a new Ring. Being a service, Ring needs to be started to do anything. @@ -222,25 +228,29 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client } r := &Ring{ - key: key, - cfg: cfg, - KVClient: store, - strategy: strategy, - ringDesc: &Desc{}, - shuffledSubringCache: map[subringCacheKey]*Ring{}, + key: key, + cfg: cfg, + KVClient: store, + strategy: strategy, + ringDesc: &Desc{}, + shuffledSubringCache: map[subringCacheKey]*Ring{}, + shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback{}, numMembersGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "ring_members", Help: "Number of members in the ring", - ConstLabels: map[string]string{"name": name}}, + ConstLabels: map[string]string{"name": name}, + }, []string{"state"}), totalTokensGauge: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "ring_tokens_total", Help: "Number of tokens in the ring", - ConstLabels: map[string]string{"name": name}}), + ConstLabels: map[string]string{"name": name}, + }), oldestTimestampGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "ring_oldest_member_timestamp", Help: "Timestamp of the oldest member in the ring.", - ConstLabels: map[string]string{"name": name}}, + ConstLabels: map[string]string{"name": name}, + }, []string{"state"}), logger: logger, } @@ -297,6 +307,11 @@ func (r *Ring) updateRingState(ringDesc *Desc) { } } + // Ensure the ID of each InstanceDesc is set based on the map of instances. This + // handles the case where some components are running older lifecyclers which do + // not set the instance ID when registering in the ring. + ringDesc.setInstanceIDs() + rc := prevRing.RingCompare(ringDesc) if rc == Equal || rc == EqualButStatesAndTimestamps { // No need to update tokens or zones. Only states and timestamps @@ -325,10 +340,15 @@ func (r *Ring) updateRingState(ringDesc *Desc) { r.ringZones = ringZones r.oldestRegisteredTimestamp = oldestRegisteredTimestamp r.lastTopologyChange = now + + // Invalidate all cached subrings. if r.shuffledSubringCache != nil { - // Invalidate all cached subrings. r.shuffledSubringCache = make(map[subringCacheKey]*Ring) } + if r.shuffledSubringWithLookbackCache != nil { + r.shuffledSubringWithLookbackCache = make(map[subringCacheKey]cachedSubringWithLookback) + } + r.updateRingMetrics(rc) } @@ -341,17 +361,19 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, } var ( - n = r.cfg.ReplicationFactor - instances = bufDescs[:0] - start = searchToken(r.ringTokens, key) - iterations = 0 + n = r.cfg.ReplicationFactor + instances = bufDescs[:0] + start = searchToken(r.ringTokens, key) + iterations = 0 + maxZones = len(r.ringTokensByZone) + maxInstances = len(r.ringDesc.Ingesters) // We use a slice instead of a map because it's faster to search within a // slice than lookup a map for a very low number of items. distinctHosts = bufHosts[:0] distinctZones = bufZones[:0] ) - for i := start; len(distinctHosts) < n && iterations < len(r.ringTokens); i++ { + for i := start; len(distinctHosts) < dsmath.Min(maxInstances, n) && len(distinctZones) < maxZones && iterations < len(r.ringTokens); i++ { iterations++ // Wrap i around in the ring. i %= len(r.ringTokens) @@ -501,32 +523,39 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro } return ReplicationSet{ - Instances: healthyInstances, - MaxErrors: maxErrors, - MaxUnavailableZones: maxUnavailableZones, + Instances: healthyInstances, + MaxErrors: maxErrors, + MaxUnavailableZones: maxUnavailableZones, + ZoneAwarenessEnabled: r.cfg.ZoneAwarenessEnabled, }, nil } -// countTokens returns the number tokens within the range for each instance. -func (r *Desc) countTokens() map[string]uint32 { +// CountTokens returns the number tokens within the range for each instance. +// In case of zone-awareness, this method takes into account only tokens of +// the same zone. More precisely, for each instance only the distance between +// its tokens and tokens of the instances from the same zone will be considered. +func (r *Desc) CountTokens() map[string]int64 { var ( - owned = map[string]uint32{} - ringTokens = r.GetTokens() + owned = make(map[string]int64, len(r.Ingesters)) + ringTokensByZone = r.getTokensByZone() ringInstanceByToken = r.getTokensInfo() ) - for i, token := range ringTokens { - var diff uint32 + for _, ringTokens := range ringTokensByZone { + for i, token := range ringTokens { + var prevToken uint32 - // Compute how many tokens are within the range. - if i+1 == len(ringTokens) { - diff = (math.MaxUint32 - token) + ringTokens[0] - } else { - diff = ringTokens[i+1] - token - } + // Compute how many tokens are within the range. + if i == 0 { + prevToken = ringTokens[len(ringTokens)-1] + } else { + prevToken = ringTokens[i-1] + } - info := ringInstanceByToken[token] - owned[info.InstanceID] = owned[info.InstanceID] + diff + diff := tokenDistance(prevToken, token) + info := ringInstanceByToken[token] + owned[info.InstanceID] = owned[info.InstanceID] + diff + } } // Set to 0 the number of owned tokens by instances which don't have tokens yet. @@ -623,14 +652,25 @@ func (r *Ring) ShuffleShard(identifier string, size int) ReadRing { // The returned subring may be unbalanced with regard to zones and should never be used for write // operations (read only). // -// This function doesn't support caching. +// This function supports caching, but the cache will only be effective if successive calls for the +// same identifier are for increasing values of (now-lookbackPeriod). func (r *Ring) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing { // Nothing to do if the shard size is not smaller then the actual ring. if size <= 0 || r.InstancesCount() <= size { return r } - return r.shuffleShard(identifier, size, lookbackPeriod, now) + if cached := r.getCachedShuffledSubringWithLookback(identifier, size, lookbackPeriod, now); cached != nil { + return cached + } + + result := r.shuffleShard(identifier, size, lookbackPeriod, now) + + if result != r { + r.setCachedShuffledSubringWithLookback(identifier, size, lookbackPeriod, now, result) + } + + return result } func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Duration, now time.Time) *Ring { @@ -700,7 +740,7 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur panic(ErrInconsistentTokensInfo) } - // Ensure we select an unique instance. + // Ensure we select a unique instance. if _, ok := shard[info.InstanceID]; ok { continue } @@ -731,12 +771,13 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur // Build a read-only ring for the shard. shardDesc := &Desc{Ingesters: shard} shardTokensByZone := shardDesc.getTokensByZone() + shardTokens := mergeTokenGroups(shardTokensByZone) return &Ring{ cfg: r.cfg, strategy: r.strategy, ringDesc: shardDesc, - ringTokens: shardDesc.GetTokens(), + ringTokens: shardTokens, ringTokensByZone: shardTokensByZone, ringZones: getZones(shardTokensByZone), @@ -752,6 +793,56 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur } } +// mergeTokenGroups returns a sorted list of all tokens in each entry in groupsByName. +// Each element of groupsByName is assumed to already be sorted. +func mergeTokenGroups(groupsByName map[string][]uint32) []uint32 { + tokenCount := 0 + groupsByIndex := make([][]uint32, 0, len(groupsByName)) + nextIndex := make([]int, 0, len(groupsByName)) + + for _, group := range groupsByName { + // If there's only one group, there's nothing to merge. + if len(groupsByName) == 1 { + return group + } + + tokenCount += len(group) + groupsByIndex = append(groupsByIndex, group) + nextIndex = append(nextIndex, 0) + } + + merged := make([]uint32, 0, tokenCount) + + for i := 0; i < tokenCount; i++ { + haveSeenGroupWithRemainingToken := false + lowestToken := uint32(0) + lowestTokenGroupIndex := 0 + + for groupIndex, group := range groupsByIndex { + nextIndexInGroup := nextIndex[groupIndex] + + if nextIndexInGroup >= len(group) { + continue + } + + if group[nextIndexInGroup] < lowestToken || !haveSeenGroupWithRemainingToken { + lowestToken = group[nextIndexInGroup] + lowestTokenGroupIndex = groupIndex + haveSeenGroupWithRemainingToken = true + } + } + + if !haveSeenGroupWithRemainingToken { + return merged + } + + merged = append(merged, lowestToken) + nextIndex[lowestTokenGroupIndex]++ + } + + return merged +} + // GetInstanceState returns the current state of an instance or an error if the // instance does not exist in the ring. func (r *Ring) GetInstanceState(instanceID string) (InstanceState, error) { @@ -826,6 +917,91 @@ func (r *Ring) setCachedShuffledSubring(identifier string, size int, subring *Ri } } +func (r *Ring) getCachedShuffledSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) *Ring { + if r.cfg.SubringCacheDisabled { + return nil + } + + r.mtx.RLock() + defer r.mtx.RUnlock() + + cached, ok := r.shuffledSubringWithLookbackCache[subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod}] + if !ok { + return nil + } + + lookbackWindowStart := now.Add(-lookbackPeriod).Unix() + if lookbackWindowStart < cached.validForLookbackWindowsStartingAfter || lookbackWindowStart > cached.validForLookbackWindowsStartingBefore { + // The cached subring is not valid for the lookback window that has been requested. + return nil + } + + cachedSubring := cached.subring + + // No need to update the cached subring if it is the original ring itself. + if r == cachedSubring { + return cachedSubring + } + + cachedSubring.mtx.Lock() + defer cachedSubring.mtx.Unlock() + + // Update instance states and timestamps. We know that the topology is the same, + // so zones and tokens are equal. + for name, cachedIng := range cachedSubring.ringDesc.Ingesters { + ing := r.ringDesc.Ingesters[name] + cachedIng.State = ing.State + cachedIng.Timestamp = ing.Timestamp + cachedSubring.ringDesc.Ingesters[name] = cachedIng + } + + return cachedSubring +} + +func (r *Ring) setCachedShuffledSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time, subring *Ring) { + if subring == nil || r.cfg.SubringCacheDisabled { + return + } + + lookbackWindowStart := now.Add(-lookbackPeriod).Unix() + validForLookbackWindowsStartingBefore := int64(math.MaxInt64) + + for _, instance := range subring.ringDesc.Ingesters { + registeredDuringLookbackWindow := instance.RegisteredTimestamp >= lookbackWindowStart + + if registeredDuringLookbackWindow && instance.RegisteredTimestamp < validForLookbackWindowsStartingBefore { + validForLookbackWindowsStartingBefore = instance.RegisteredTimestamp + } + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + // Only cache if *this* ring hasn't changed since computing result + // (which can happen between releasing the read lock and getting read-write lock). + // Note that shuffledSubringWithLookbackCache can be only nil when set by test. + if r.shuffledSubringWithLookbackCache == nil { + return + } + + if !r.lastTopologyChange.Equal(subring.lastTopologyChange) { + return + } + + // Only update cache if subring's lookback window starts later than the previously cached subring for this identifier, + // if there is one. This prevents cache thrashing due to different calls competing if their lookback windows start + // before and after the time of an instance registering. + key := subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod} + + if existingEntry, haveCached := r.shuffledSubringWithLookbackCache[key]; !haveCached || existingEntry.validForLookbackWindowsStartingAfter < lookbackWindowStart { + r.shuffledSubringWithLookbackCache[key] = cachedSubringWithLookback{ + subring: subring, + validForLookbackWindowsStartingAfter: lookbackWindowStart, + validForLookbackWindowsStartingBefore: validForLookbackWindowsStartingBefore, + } + } +} + func (r *Ring) CleanupShuffleShardCache(identifier string) { if r.cfg.SubringCacheDisabled { return @@ -839,13 +1015,19 @@ func (r *Ring) CleanupShuffleShardCache(identifier string) { delete(r.shuffledSubringCache, k) } } + + for k := range r.shuffledSubringWithLookbackCache { + if k.identifier == identifier { + delete(r.shuffledSubringWithLookbackCache, k) + } + } } func (r *Ring) casRing(ctx context.Context, f func(in interface{}) (out interface{}, retry bool, err error)) error { return r.KVClient.CAS(ctx, r.key, f) } -func (r *Ring) getRing(ctx context.Context) (*Desc, error) { +func (r *Ring) getRing(_ context.Context) (*Desc, error) { r.mtx.RLock() defer r.mtx.RUnlock() diff --git a/vendor/github.com/grafana/dskit/ring/ring.pb.go b/vendor/github.com/grafana/dskit/ring/ring.pb.go index 453ac4c2..fb9d76c4 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.pb.go +++ b/vendor/github.com/grafana/dskit/ring/ring.pb.go @@ -59,6 +59,7 @@ func (InstanceState) EnumDescriptor() ([]byte, []int) { return fileDescriptor_26381ed67e202a6e, []int{0} } +// Desc is the top-level type used to model a ring, containing information for individual instances. type Desc struct { Ingesters map[string]InstanceDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -102,6 +103,7 @@ func (m *Desc) GetIngesters() map[string]InstanceDesc { return nil } +// InstanceDesc is the top-level type used to model per-instance information in a ring. type InstanceDesc struct { Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` // Unix timestamp (with seconds precision) of the last heartbeat sent @@ -124,6 +126,8 @@ type InstanceDesc struct { // was already registered before "now". If unknown (0), it should be left as is, and the // code will properly deal with that. RegisteredTimestamp int64 `protobuf:"varint,8,opt,name=registered_timestamp,json=registeredTimestamp,proto3" json:"registered_timestamp,omitempty"` + // ID of the instance. This value is the same as the key in the ingesters map in Desc. + Id string `protobuf:"bytes,9,opt,name=id,proto3" json:"id,omitempty"` } func (m *InstanceDesc) Reset() { *m = InstanceDesc{} } @@ -200,6 +204,13 @@ func (m *InstanceDesc) GetRegisteredTimestamp() int64 { return 0 } +func (m *InstanceDesc) GetId() string { + if m != nil { + return m.Id + } + return "" +} + func init() { proto.RegisterEnum("ring.InstanceState", InstanceState_name, InstanceState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") @@ -210,34 +221,35 @@ func init() { func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x18, 0x84, 0xf7, 0x8f, 0x37, 0xae, 0xf3, 0x87, 0x56, 0xd6, 0x16, 0x21, 0x53, 0xa1, 0xc5, 0xea, - 0xc9, 0x20, 0xe1, 0x8a, 0xc0, 0x01, 0x21, 0x71, 0x68, 0xa9, 0x41, 0xb6, 0xa2, 0x50, 0x99, 0xa8, - 0x57, 0xe4, 0x24, 0x8b, 0xb1, 0x4a, 0xec, 0xca, 0xde, 0x20, 0x95, 0x13, 0x8f, 0xc0, 0x0b, 0x70, - 0xe7, 0x51, 0x7a, 0xcc, 0x09, 0xf5, 0x84, 0x88, 0x73, 0xe1, 0xd8, 0x47, 0x40, 0xbb, 0x6e, 0x65, - 0x72, 0x9b, 0xcf, 0x33, 0xff, 0x8c, 0x2d, 0x19, 0xb1, 0xcc, 0xf2, 0xd4, 0x3f, 0x2f, 0x0b, 0x59, - 0x30, 0xaa, 0xf4, 0xde, 0x93, 0x34, 0x93, 0x9f, 0x16, 0x13, 0x7f, 0x5a, 0xcc, 0x0f, 0xd2, 0x22, - 0x2d, 0x0e, 0xb4, 0x39, 0x59, 0x7c, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d, - 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf, - 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xad, 0x17, 0xe4, 0xb2, 0xbc, 0x38, 0xa2, 0x97, - 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x13, 0x17, - 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0x85, 0x70, 0x3a, 0x2e, - 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xc3, 0xbc, 0x92, 0x49, 0x3e, 0x15, 0x6a, 0x26, 0x6e, 0x02, 0x2f, - 0x3b, 0x2f, 0x20, 0xa2, 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, - 0x64, 0x36, 0x2b, 0x6f, 0x7a, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0xb9, 0xa8, 0x64, 0x32, 0x3f, - 0xd7, 0xe5, 0x46, 0xdc, 0x3e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, - 0x19, 0xec, 0x6e, 0xce, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x26, 0xf2, - 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, - 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0xbe, 0x58, 0xcc, 0x3e, 0xb4, 0xfb, 0x96, 0xde, - 0xdf, 0x6d, 0xbd, 0xf1, 0xad, 0x15, 0x51, 0x8b, 0xda, 0xdd, 0x88, 0x5a, 0x5d, 0xdb, 0x7c, 0x3c, - 0xc4, 0xed, 0x8d, 0x57, 0x60, 0x88, 0xe6, 0xe1, 0xeb, 0x71, 0x78, 0x1a, 0xd8, 0x84, 0xf5, 0x71, - 0x6b, 0x18, 0x1c, 0x9e, 0x86, 0xa3, 0xb7, 0x36, 0x28, 0x38, 0x09, 0x46, 0xc7, 0x0a, 0x3a, 0x0a, - 0xa2, 0x77, 0xe1, 0x48, 0x81, 0xc1, 0x2c, 0xa4, 0xc3, 0xe0, 0xcd, 0xd8, 0xa6, 0x47, 0xcf, 0x97, - 0x2b, 0x4e, 0xae, 0x56, 0x9c, 0x5c, 0xaf, 0x38, 0x7c, 0xab, 0x39, 0xfc, 0xac, 0x39, 0x5c, 0xd6, - 0x1c, 0x96, 0x35, 0x87, 0x3f, 0x35, 0x87, 0xbf, 0x35, 0x27, 0xd7, 0x35, 0x87, 0xef, 0x6b, 0x4e, - 0x96, 0x6b, 0x4e, 0xae, 0xd6, 0x9c, 0x4c, 0x4c, 0xfd, 0x0f, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff, - 0xff, 0x97, 0x76, 0x41, 0xaf, 0x46, 0x02, 0x00, 0x00, + // 435 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x1c, 0xc5, 0xe7, 0x9f, 0x4c, 0xb3, 0xe9, 0xbf, 0x6e, 0x09, 0xb3, 0x22, 0x71, 0x91, 0x31, 0xec, + 0x29, 0x0a, 0x76, 0xb1, 0x7a, 0x10, 0xc1, 0xc3, 0xae, 0x1b, 0x25, 0xa5, 0xd4, 0x25, 0x96, 0xbd, + 0x4a, 0xda, 0x8c, 0x31, 0xac, 0x4d, 0x96, 0x64, 0x2a, 0xac, 0x27, 0x3f, 0x82, 0x5f, 0xc0, 0xbb, + 0x1f, 0x65, 0x8f, 0x3d, 0xee, 0x49, 0x6c, 0x0a, 0xe2, 0x71, 0x3f, 0x82, 0xcc, 0xa4, 0x5a, 0x7b, + 0x7b, 0xbf, 0xbc, 0x37, 0xef, 0x4d, 0x60, 0x10, 0xcb, 0x2c, 0x4f, 0x7b, 0x17, 0x65, 0x21, 0x0b, + 0x46, 0x95, 0xde, 0x7f, 0x94, 0x66, 0xf2, 0xc3, 0x7c, 0xd2, 0x9b, 0x16, 0xb3, 0xc3, 0xb4, 0x48, + 0x8b, 0x43, 0x6d, 0x4e, 0xe6, 0xef, 0x35, 0x69, 0xd0, 0xaa, 0x39, 0x74, 0xf0, 0x0d, 0x90, 0x9e, + 0x88, 0x6a, 0xca, 0x5e, 0x60, 0x3b, 0xcb, 0x53, 0x51, 0x49, 0x51, 0x56, 0x2e, 0x78, 0xa6, 0xdf, + 0xe9, 0xdf, 0xed, 0xe9, 0x76, 0x65, 0xf7, 0xc2, 0xbf, 0x5e, 0x90, 0xcb, 0xf2, 0xf2, 0x98, 0x5e, + 0xfd, 0xb8, 0x4f, 0xa2, 0xcd, 0x89, 0xfd, 0x53, 0xec, 0x6e, 0x47, 0x98, 0x83, 0xe6, 0xb9, 0xb8, + 0x74, 0xc1, 0x03, 0xbf, 0x1d, 0x29, 0xc9, 0x7c, 0x6c, 0x7d, 0x8a, 0x3f, 0xce, 0x85, 0x6b, 0x78, + 0xe0, 0x77, 0xfa, 0xac, 0xa9, 0x0f, 0xf3, 0x4a, 0xc6, 0xf9, 0x54, 0xa8, 0x99, 0xa8, 0x09, 0x3c, + 0x37, 0x9e, 0xc1, 0x80, 0xda, 0x86, 0x63, 0x1e, 0xfc, 0x02, 0xbc, 0xf5, 0x7f, 0x82, 0x31, 0xa4, + 0x71, 0x92, 0x94, 0xeb, 0x5e, 0xad, 0xd9, 0x3d, 0x6c, 0xcb, 0x6c, 0x26, 0x2a, 0x19, 0xcf, 0x2e, + 0x74, 0xb9, 0x19, 0x6d, 0x3e, 0xb0, 0x07, 0xd8, 0xaa, 0x64, 0x2c, 0x85, 0x6b, 0x7a, 0xe0, 0x77, + 0xfb, 0x7b, 0xdb, 0xb3, 0x6f, 0x95, 0x15, 0x35, 0x09, 0x76, 0x07, 0x2d, 0x59, 0x9c, 0x8b, 0xbc, + 0x72, 0x2d, 0xcf, 0xf4, 0x77, 0xa3, 0x35, 0xa9, 0xd1, 0xcf, 0x45, 0x2e, 0xdc, 0x9d, 0x66, 0x54, + 0x69, 0xf6, 0x18, 0x6f, 0x97, 0x22, 0xcd, 0xd4, 0x1f, 0x8b, 0xe4, 0xdd, 0x66, 0xdf, 0xd6, 0xfb, + 0x7b, 0x1b, 0x6f, 0xfc, 0xef, 0x26, 0x5d, 0x34, 0xb2, 0xc4, 0x6d, 0xeb, 0x12, 0x23, 0x4b, 0x06, + 0xd4, 0xa6, 0x4e, 0x6b, 0x40, 0xed, 0x96, 0x63, 0x3d, 0x1c, 0xe2, 0xee, 0xd6, 0x95, 0x18, 0xa2, + 0x75, 0xf4, 0x72, 0x1c, 0x9e, 0x05, 0x0e, 0x61, 0x1d, 0xdc, 0x19, 0x06, 0x47, 0x67, 0xe1, 0xe8, + 0xb5, 0x03, 0x0a, 0x4e, 0x83, 0xd1, 0x89, 0x02, 0x43, 0xc1, 0xe0, 0x4d, 0x38, 0x52, 0x60, 0x32, + 0x1b, 0xe9, 0x30, 0x78, 0x35, 0x76, 0xe8, 0xf1, 0xd3, 0xc5, 0x92, 0x93, 0xeb, 0x25, 0x27, 0x37, + 0x4b, 0x0e, 0x5f, 0x6a, 0x0e, 0xdf, 0x6b, 0x0e, 0x57, 0x35, 0x87, 0x45, 0xcd, 0xe1, 0x67, 0xcd, + 0xe1, 0x77, 0xcd, 0xc9, 0x4d, 0xcd, 0xe1, 0xeb, 0x8a, 0x93, 0xc5, 0x8a, 0x93, 0xeb, 0x15, 0x27, + 0x13, 0x4b, 0xbf, 0x89, 0x27, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xad, 0x7a, 0xa4, 0x89, 0x56, + 0x02, 0x00, 0x00, } func (x InstanceState) String() string { @@ -320,6 +332,9 @@ func (this *InstanceDesc) Equal(that interface{}) bool { if this.RegisteredTimestamp != that1.RegisteredTimestamp { return false } + if this.Id != that1.Id { + return false + } return true } func (this *Desc) GoString() string { @@ -348,7 +363,7 @@ func (this *InstanceDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 11) s = append(s, "&ring.InstanceDesc{") s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") @@ -356,6 +371,7 @@ func (this *InstanceDesc) GoString() string { s = append(s, "Tokens: "+fmt.Sprintf("%#v", this.Tokens)+",\n") s = append(s, "Zone: "+fmt.Sprintf("%#v", this.Zone)+",\n") s = append(s, "RegisteredTimestamp: "+fmt.Sprintf("%#v", this.RegisteredTimestamp)+",\n") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -434,6 +450,13 @@ func (m *InstanceDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintRing(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x4a + } if m.RegisteredTimestamp != 0 { i = encodeVarintRing(dAtA, i, uint64(m.RegisteredTimestamp)) i-- @@ -543,6 +566,10 @@ func (m *InstanceDesc) Size() (n int) { if m.RegisteredTimestamp != 0 { n += 1 + sovRing(uint64(m.RegisteredTimestamp)) } + l = len(m.Id) + if l > 0 { + n += 1 + l + sovRing(uint64(l)) + } return n } @@ -583,6 +610,7 @@ func (this *InstanceDesc) String() string { `Tokens:` + fmt.Sprintf("%v", this.Tokens) + `,`, `Zone:` + fmt.Sprintf("%v", this.Zone) + `,`, `RegisteredTimestamp:` + fmt.Sprintf("%v", this.RegisteredTimestamp) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, `}`, }, "") return s @@ -1003,6 +1031,38 @@ func (m *InstanceDesc) Unmarshal(dAtA []byte) error { break } } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRing + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRing + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRing(dAtA[iNdEx:]) diff --git a/vendor/github.com/grafana/dskit/ring/ring.proto b/vendor/github.com/grafana/dskit/ring/ring.proto index 8f464dc2..08bf8850 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.proto +++ b/vendor/github.com/grafana/dskit/ring/ring.proto @@ -7,11 +7,13 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; +// Desc is the top-level type used to model a ring, containing information for individual instances. message Desc { map ingesters = 1 [(gogoproto.nullable) = false]; reserved 2; } +// InstanceDesc is the top-level type used to model per-instance information in a ring. message InstanceDesc { reserved 4, 5; // old, deprecated fields @@ -39,6 +41,9 @@ message InstanceDesc { // was already registered before "now". If unknown (0), it should be left as is, and the // code will properly deal with that. int64 registered_timestamp = 8; + + // ID of the instance. This value is the same as the key in the ingesters map in Desc. + string id = 9; } enum InstanceState { diff --git a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go new file mode 100644 index 00000000..23638250 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go @@ -0,0 +1,358 @@ +package ring + +import ( + "container/heap" + "fmt" + "math" + "regexp" + "sort" + "strconv" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "golang.org/x/exp/slices" +) + +const ( + totalTokensCount = math.MaxUint32 + 1 + optimalTokensPerInstance = 512 + maxZonesCount = 8 +) + +var ( + instanceIDRegex = regexp.MustCompile(`^(.*)-(\d+)$`) + errorBadInstanceIDFormat = func(instanceID string) error { + return fmt.Errorf("unable to extract instance id from %q", instanceID) + } + errorNoPreviousInstance = fmt.Errorf("impossible to find the instance preceding the target instance, because it is the first instance") + + errorMissingPreviousInstance = func(requiredInstanceID string) error { + return fmt.Errorf("the instance %q has not been registered to the ring or has no tokens yet", requiredInstanceID) + } + errorZoneCountOutOfBound = func(zonesCount int) error { + return fmt.Errorf("number of zones %d is not correct: it must be greater than 0 and less or equal than %d", zonesCount, maxZonesCount) + } + errorZoneNotValid = func(zone string) error { + return fmt.Errorf("zone %s is not valid", zone) + } + errorMultipleOfZonesCount = func(optimalTokenOwnership uint32, token ringToken) error { + return fmt.Errorf("calculation of a new token between %d and %d with optimal token ownership %d was impossible: optimal token ownership must be a positive multiple of maximal allowed number of zones %d", token.prevToken, token.token, optimalTokenOwnership, maxZonesCount) + } + errorLowerAndUpperBoundModulo = func(optimalTokenOwnership uint32, token ringToken) error { + return fmt.Errorf("calculation of a new token between %d and %d with optimal token ownership %d was impossible: lower and upper bounds must be congruent modulo maximal allowed number of zones %d", token.prevToken, token.token, optimalTokenOwnership, maxZonesCount) + } + errorDistanceBetweenTokensNotBigEnough = func(optimalTokenOwnership int, ownership int64, token ringToken) error { + return fmt.Errorf("calculation of a new token between %d and %d with optimal token ownership %d was impossible: distance between lower and upper bound %d is not big enough", token.prevToken, token.token, optimalTokenOwnership, ownership) + } +) + +type SpreadMinimizingTokenGenerator struct { + instanceID int + instance string + zoneID int + spreadMinimizingZones []string + canJoinEnabled bool + logger log.Logger +} + +func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool, logger log.Logger) (*SpreadMinimizingTokenGenerator, error) { + if len(spreadMinimizingZones) <= 0 || len(spreadMinimizingZones) > maxZonesCount { + return nil, errorZoneCountOutOfBound(len(spreadMinimizingZones)) + } + sortedZones := make([]string, len(spreadMinimizingZones)) + copy(sortedZones, spreadMinimizingZones) + if !slices.IsSorted(sortedZones) { + sort.Strings(sortedZones) + } + instanceID, err := parseInstanceID(instance) + if err != nil { + return nil, err + } + zoneID, err := findZoneID(zone, sortedZones) + if err != nil { + return nil, err + } + + tokenGenerator := &SpreadMinimizingTokenGenerator{ + instanceID: instanceID, + instance: instance, + zoneID: zoneID, + spreadMinimizingZones: sortedZones, + canJoinEnabled: canJoinEnabled, + logger: logger, + } + return tokenGenerator, nil +} + +func parseInstanceID(instanceID string) (int, error) { + parts := instanceIDRegex.FindStringSubmatch(instanceID) + if len(parts) != 3 { + return 0, errorBadInstanceIDFormat(instanceID) + } + return strconv.Atoi(parts[2]) +} + +// previousInstance determines the string id of the instance preceding the given instance string id. +// If it is impossible to parse the given instanceID, or it is impossible to determine its predecessor +// because the passed instanceID has a bad format, or has no predecessor, an error is returned. +// For examples, my-instance-1 is preceded by instance my-instance-0, but my-instance-0 has no +// predecessor because its index is 0. +func previousInstance(instanceID string) (string, error) { + parts := instanceIDRegex.FindStringSubmatch(instanceID) + if len(parts) != 3 { + return "", errorBadInstanceIDFormat(instanceID) + } + id, err := strconv.Atoi(parts[2]) + if err != nil { + return "", err + } + if id == 0 { + return "", errorNoPreviousInstance + } + return fmt.Sprintf("%s-%d", parts[1], id-1), nil +} + +// findZoneID gets a zone name and a slice of sorted zones, +// and return the index of the zone in the slice. +func findZoneID(zone string, sortedZones []string) (int, error) { + index := slices.Index(sortedZones, zone) + if index < 0 { + return 0, errorZoneNotValid(zone) + } + return index, nil +} + +// generateFirstInstanceTokens calculates a set of tokens that should be assigned to the first instance (with id 0) +// of the zone of the underlying instance. +func (t *SpreadMinimizingTokenGenerator) generateFirstInstanceTokens() Tokens { + // In this approach all the tokens from the same zone are equal to each other modulo maxZonesCount. + // Therefore, tokenDistance is calculated as a multiple of maxZonesCount, so that we ensure that + // the following for loop calculates the actual tokens following the approach's requirement. + tokenDistance := (totalTokensCount / optimalTokensPerInstance / maxZonesCount) * maxZonesCount + tokens := make(Tokens, 0, optimalTokensPerInstance) + for i := 0; i < optimalTokensPerInstance; i++ { + token := uint32(i*tokenDistance) + uint32(t.zoneID) + tokens = append(tokens, token) + } + return tokens +} + +// calculateNewToken determines where in the range represented by the given ringToken should a new token be placed +// in order to satisfy the constraint represented by the optimalTokenOwnership. This method assumes that: +// - ringToken.token % maxZonesCount == ringToken.prevToken % zonesCount +// - optimalTokenOwnership % maxZonesCount == 0, +// where zonesCount is the number of zones in the ring. The caller of this function must ensure that these assumptions hold. +func (t *SpreadMinimizingTokenGenerator) calculateNewToken(token ringToken, optimalTokenOwnership uint32) (uint32, error) { + if optimalTokenOwnership < maxZonesCount || optimalTokenOwnership%maxZonesCount != 0 { + return 0, errorMultipleOfZonesCount(optimalTokenOwnership, token) + } + if token.prevToken%maxZonesCount != token.token%maxZonesCount { + return 0, errorLowerAndUpperBoundModulo(optimalTokenOwnership, token) + } + ownership := tokenDistance(token.prevToken, token.token) + if ownership <= int64(optimalTokenOwnership) { + return 0, errorDistanceBetweenTokensNotBigEnough(int(optimalTokenOwnership), ownership, token) + } + // In the present approach tokens of successive zones are immediate successors of the tokens in + // the previous zone. This means that once a token of the "leading" zone, i.e., the zone with + // id 0 is determined, we must have enough space to accommodate the corresponding tokens in the + // remaining (maxZonesCount-1) zones. Hence, the highest token of the leading zone must be a + // multiple of maxZonesCount, that guarantees that there are remaining (maxZonesCount-1) available + // tokens in the token space. + maxTokenValue := uint32(((totalTokensCount / maxZonesCount) - 1) * maxZonesCount) + offset := maxTokenValue - token.prevToken + if offset < optimalTokenOwnership { + newToken := optimalTokenOwnership - offset + return newToken, nil + } + return token.prevToken + optimalTokenOwnership, nil +} + +// optimalTokenOwnership calculates the optimal ownership of the remaining currTokensCount tokens of an instance +// having the given current instances ownership currInstanceOwnership and the given optimal instance ownership +// optimalInstanceOwnership. The resulting token ownership must be a multiple of the number of zones. +func (t *SpreadMinimizingTokenGenerator) optimalTokenOwnership(optimalInstanceOwnership, currInstanceOwnership float64, remainingTokensCount uint32) uint32 { + optimalTokenOwnership := uint32(optimalInstanceOwnership-currInstanceOwnership) / remainingTokensCount + return (optimalTokenOwnership / maxZonesCount) * maxZonesCount +} + +// GenerateTokens returns at most requestedTokensCount unique tokens, none of which clashes with the given +// allTakenTokens, representing the set of all tokens currently present in the ring. Returned tokens are sorted. +// The optimal number of tokens (optimalTokenPerInstance), i.e., 512, reserved for the underlying instance are +// generated by generateAllTokens. GenerateTokens selects the first requestedTokensCount tokens from the reserved +// tokens set, that are not already present in the takenTokens. +// The number of returned tokens might be lower than the requested number of tokens in the following cases: +// - if tokensCount is higher than 512 (optimalTokensPerInstance), or +// - if among the 512 (optimalTokenPerInstance) reserved tokens there is less than tokenCount +// tokens not already present in takenTokens. +func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int, allTakenTokens []uint32) Tokens { + used := make(map[uint32]bool, len(allTakenTokens)) + for _, v := range allTakenTokens { + used[v] = true + } + + allTokens := t.generateAllTokens() + uniqueTokens := make(Tokens, 0, requestedTokensCount) + + // allTokens is a sorted slice of tokens for instance t.cfg.InstanceID in zone t.cfg.zone + // We filter out tokens from allTakenTokens, if any, and return at most requestedTokensCount tokens. + for i := 0; i < len(allTokens) && len(uniqueTokens) < requestedTokensCount; i++ { + token := allTokens[i] + if used[token] { + continue + } + uniqueTokens = append(uniqueTokens, token) + } + return uniqueTokens +} + +// generateAllTokens generates the optimal number of tokens (optimalTokenPerInstance), i.e., 512, +// for the underlying instance (with id t.instanceID). Generated tokens are sorted, and they are +// distributed in such a way that registered ownership of the instance t.instanceID, when it is +// placed in the ring that already contains instances with all the ids lower that t.instanceID +// is optimal. +// Calls to this method will always return the same set of tokens. +func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { + tokensByInstanceID := t.generateTokensByInstanceID() + allTokens := tokensByInstanceID[t.instanceID] + slices.Sort(allTokens) + return allTokens +} + +// generateTokensByInstanceID generates the optimal number of tokens (optimalTokenPerInstance), +// i.e., 512, for all instances whose id is less or equal to the id of the underlying instance +// (with id t.instanceID). Generated tokens are not sorted, but they are distributed in such a +// way that registered ownership of all the instances is optimal. +// Calls to this method will always return the same set of tokens. +func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]Tokens { + firstInstanceTokens := t.generateFirstInstanceTokens() + tokensByInstanceID := make(map[int]Tokens, t.instanceID+1) + tokensByInstanceID[0] = firstInstanceTokens + + if t.instanceID == 0 { + return tokensByInstanceID + } + + // tokensQueues is a slice of priority queues. Slice indexes correspond + // to the ids of instances, while priority queues represent the tokens + // of the corresponding instance, ordered from highest to lowest ownership. + tokensQueues := make([]ownershipPriorityQueue[ringToken], t.instanceID) + + // Create and initialize priority queue of tokens for the first instance + tokensQueue := newPriorityQueue[ringToken](optimalTokensPerInstance) + prev := len(firstInstanceTokens) - 1 + firstInstanceOwnership := 0.0 + for tk, token := range firstInstanceTokens { + tokenOwnership := float64(tokenDistance(firstInstanceTokens[prev], token)) + firstInstanceOwnership += tokenOwnership + heap.Push(&tokensQueue, newRingTokenOwnershipInfo(token, firstInstanceTokens[prev])) + prev = tk + } + tokensQueues[0] = tokensQueue + + // instanceQueue is a priority queue of instances such that instances with higher ownership have a higher priority + instanceQueue := newPriorityQueue[ringInstance](t.instanceID) + heap.Push(&instanceQueue, newRingInstanceOwnershipInfo(0, firstInstanceOwnership)) + + // ignoredInstances is a slice of the current instances whose tokens + // don't have enough space to accommodate new tokens. + ignoredInstances := make([]ownershipInfo[ringInstance], 0, t.instanceID) + + for i := 1; i <= t.instanceID; i++ { + optimalInstanceOwnership := float64(totalTokensCount) / float64(i+1) + currInstanceOwnership := 0.0 + addedTokens := 0 + ignoredInstances = ignoredInstances[:0] + tokens := make(Tokens, 0, optimalTokensPerInstance) + // currInstanceTokenQueue is the priority queue of tokens of newInstance + currInstanceTokenQueue := newPriorityQueue[ringToken](optimalTokensPerInstance) + for addedTokens < optimalTokensPerInstance { + optimalTokenOwnership := t.optimalTokenOwnership(optimalInstanceOwnership, currInstanceOwnership, uint32(optimalTokensPerInstance-addedTokens)) + highestOwnershipInstance := instanceQueue.Peek() + if highestOwnershipInstance == nil || highestOwnershipInstance.ownership <= float64(optimalTokenOwnership) { + level.Warn(t.logger).Log("msg", "it was impossible to add a token because the instance with the highest ownership cannot satisfy the request", "added tokens", addedTokens+1, "highest ownership", highestOwnershipInstance.ownership, "requested ownership", optimalTokenOwnership) + // if this happens, it means that we cannot accommodate other tokens, so we panic + err := fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone %s because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID], optimalTokenOwnership) + panic(err) + } + tokensQueue := tokensQueues[highestOwnershipInstance.item.instanceID] + highestOwnershipToken := tokensQueue.Peek() + if highestOwnershipToken.ownership <= float64(optimalTokenOwnership) { + // The token with the highest ownership of the instance with the highest ownership could not + // accommodate a new token, hence we ignore this instance and pass to the next instance. + ignoredInstances = append(ignoredInstances, heap.Pop(&instanceQueue).(ownershipInfo[ringInstance])) + continue + } + token := highestOwnershipToken.item + newToken, err := t.calculateNewToken(token, optimalTokenOwnership) + if err != nil { + level.Error(t.logger).Log("msg", "it was impossible to calculate a new token because an error occurred", "err", err) + // if this happens, it means that we cannot accommodate additional tokens, so we panic + err := fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone %s", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID]) + panic(err) + } + tokens = append(tokens, newToken) + // add the new token to currInstanceTokenQueue + heap.Push(&currInstanceTokenQueue, newRingTokenOwnershipInfo(newToken, token.prevToken)) + + oldTokenOwnership := highestOwnershipToken.ownership + newTokenOwnership := float64(tokenDistance(newToken, token.token)) + currInstanceOwnership += oldTokenOwnership - newTokenOwnership + + // The token with the highest ownership of the instance with the highest ownership has changed, + // so we propagate these changes in the corresponding tokens queue. + highestOwnershipToken.item.prevToken = newToken + highestOwnershipToken.ownership = newTokenOwnership + heap.Fix(&tokensQueue, 0) + + // The ownership of the instance with the highest ownership has changed, + // so we propagate these changes in the instances queue. + highestOwnershipInstance.ownership = highestOwnershipInstance.ownership - oldTokenOwnership + newTokenOwnership + heap.Fix(&instanceQueue, 0) + + addedTokens++ + } + tokensByInstanceID[i] = tokens + // if this is the last iteration we return, so we avoid to call additional heap.Pushs + if i == t.instanceID { + return tokensByInstanceID + } + + // If there were some ignored instances, we put them back on the queue. + for _, ignoredInstance := range ignoredInstances { + heap.Push(&instanceQueue, ignoredInstance) + } + + tokensQueues[i] = currInstanceTokenQueue + + // add the current instance with the calculated ownership currInstanceOwnership to instanceQueue + heap.Push(&instanceQueue, newRingInstanceOwnershipInfo(i, currInstanceOwnership)) + } + + return tokensByInstanceID +} + +func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDesc) error { + if !t.canJoinEnabled { + return nil + } + + prevInstance, err := previousInstance(t.instance) + if err != nil { + if errors.Is(err, errorNoPreviousInstance) { + return nil + } + return err + } + instanceDesc, ok := instances[prevInstance] + if ok && len(instanceDesc.Tokens) != 0 { + return nil + } + return errorMissingPreviousInstance(prevInstance) +} + +func (t *SpreadMinimizingTokenGenerator) CanJoinEnabled() bool { + return t.canJoinEnabled +} diff --git a/vendor/github.com/grafana/dskit/ring/token_generator.go b/vendor/github.com/grafana/dskit/ring/token_generator.go new file mode 100644 index 00000000..159d9ffd --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/token_generator.go @@ -0,0 +1,70 @@ +package ring + +import ( + "math/rand" + "sort" + "time" +) + +type TokenGenerator interface { + // GenerateTokens generates at most requestedTokensCount unique tokens, none of which clashes with + // the given allTakenTokens, representing the set of all tokens currently present in the ring. + // Generated tokens are sorted. + GenerateTokens(requestedTokensCount int, allTakenTokens []uint32) Tokens + + // CanJoin checks whether the instance owning this TokenGenerator can join the set of the given instances satisfies, + // and fails if it is not possible. + CanJoin(instances map[string]InstanceDesc) error + + // CanJoinEnabled returns true if the instance owning this TokenGenerator should perform the CanJoin check before + // it tries to join the ring. + CanJoinEnabled() bool +} + +type RandomTokenGenerator struct{} + +func NewRandomTokenGenerator() *RandomTokenGenerator { + return &RandomTokenGenerator{} +} + +// GenerateTokens generates at most requestedTokensCount unique random tokens, none of which clashes with +// the given allTakenTokens, representing the set of all tokens currently present in the ring. +// Generated tokens are sorted. +func (t *RandomTokenGenerator) GenerateTokens(requestedTokensCount int, allTakenTokens []uint32) Tokens { + if requestedTokensCount <= 0 { + return []uint32{} + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + used := make(map[uint32]bool, len(allTakenTokens)) + for _, v := range allTakenTokens { + used[v] = true + } + + tokens := make([]uint32, 0, requestedTokensCount) + for i := 0; i < requestedTokensCount; { + candidate := r.Uint32() + if used[candidate] { + continue + } + used[candidate] = true + tokens = append(tokens, candidate) + i++ + } + + // Ensure returned tokens are sorted. + sort.Slice(tokens, func(i, j int) bool { + return tokens[i] < tokens[j] + }) + + return tokens +} + +func (t *RandomTokenGenerator) CanJoin(_ map[string]InstanceDesc) error { + return nil +} + +func (t *RandomTokenGenerator) CanJoinEnabled() bool { + return false +} diff --git a/vendor/github.com/grafana/dskit/ring/util.go b/vendor/github.com/grafana/dskit/ring/util.go index 17eb3983..a21c0f2f 100644 --- a/vendor/github.com/grafana/dskit/ring/util.go +++ b/vendor/github.com/grafana/dskit/ring/util.go @@ -2,60 +2,25 @@ package ring import ( "context" - "fmt" - "math/rand" - "net" + "math" "sort" - "strings" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" + "golang.org/x/exp/slices" "github.com/grafana/dskit/backoff" + "github.com/grafana/dskit/netutil" ) -// GenerateTokens make numTokens unique random tokens, none of which clash -// with takenTokens. Generated tokens are sorted. -func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { - if numTokens <= 0 { - return []uint32{} - } - - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - used := make(map[uint32]bool, len(takenTokens)) - for _, v := range takenTokens { - used[v] = true - } - - tokens := make([]uint32, 0, numTokens) - for i := 0; i < numTokens; { - candidate := r.Uint32() - if used[candidate] { - continue - } - used[candidate] = true - tokens = append(tokens, candidate) - i++ - } - - // Ensure returned tokens are sorted. - sort.Slice(tokens, func(i, j int) bool { - return tokens[i] < tokens[j] - }) - - return tokens -} - // GetInstanceAddr returns the address to use to register the instance // in the ring. -func GetInstanceAddr(configAddr string, netInterfaces []string, logger log.Logger) (string, error) { +func GetInstanceAddr(configAddr string, netInterfaces []string, logger log.Logger, enableInet6 bool) (string, error) { if configAddr != "" { return configAddr, nil } - addr, err := getFirstAddressOf(netInterfaces, logger) + addr, err := netutil.GetFirstAddressOf(netInterfaces, logger, enableInet6) if err != nil { return "", err } @@ -163,62 +128,23 @@ func getZones(tokens map[string][]uint32) []string { // searchToken returns the offset of the tokens entry holding the range for the provided key. func searchToken(tokens []uint32, key uint32) int { - i := sort.Search(len(tokens), func(x int) bool { - return tokens[x] > key - }) + i, found := slices.BinarySearch(tokens, key) + if found { + // we want the first token > key, not >= key + i = i + 1 + } if i >= len(tokens) { i = 0 } return i } -// GetFirstAddressOf returns the first IPv4 address of the supplied interface names, omitting any 169.254.x.x automatic private IPs if possible. -func getFirstAddressOf(names []string, logger log.Logger) (string, error) { - var ipAddr string - for _, name := range names { - inf, err := net.InterfaceByName(name) - if err != nil { - level.Warn(logger).Log("msg", "error getting interface", "inf", name, "err", err) - continue - } - addrs, err := inf.Addrs() - if err != nil { - level.Warn(logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) - continue - } - if len(addrs) <= 0 { - level.Warn(logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) - continue - } - if ip := filterIPs(addrs); ip != "" { - ipAddr = ip - } - if strings.HasPrefix(ipAddr, `169.254.`) || ipAddr == "" { - continue - } - return ipAddr, nil - } - if ipAddr == "" { - return "", fmt.Errorf("No address found for %s", names) - } - if strings.HasPrefix(ipAddr, `169.254.`) { - level.Warn(logger).Log("msg", "using automatic private ip", "address", ipAddr) - } - return ipAddr, nil -} - -// filterIPs attempts to return the first non automatic private IP (APIPA / 169.254.x.x) if possible, only returning APIPA if available and no other valid IP is found. -func filterIPs(addrs []net.Addr) string { - var ipAddr string - for _, addr := range addrs { - if v, ok := addr.(*net.IPNet); ok { - if ip := v.IP.To4(); ip != nil { - ipAddr = v.IP.String() - if !strings.HasPrefix(ipAddr, `169.254.`) { - return ipAddr - } - } - } +// tokenDistance returns the distance between the given tokens from and to. +// The distance between a token and itself is the whole ring, i.e., math.MaxUint32 + 1. +func tokenDistance(from, to uint32) int64 { + if from < to { + return int64(to - from) } - return ipAddr + // the trailing +1 is needed to ensure that token 0 is counted + return math.MaxUint32 - int64(from) + int64(to) + 1 } diff --git a/vendor/github.com/weaveworks/common/server/fake_server.pb.go b/vendor/github.com/grafana/dskit/server/fake_server.pb.go similarity index 53% rename from vendor/github.com/weaveworks/common/server/fake_server.pb.go rename to vendor/github.com/grafana/dskit/server/fake_server.pb.go index 4fafbb2f..75ee6b0a 100644 --- a/vendor/github.com/weaveworks/common/server/fake_server.pb.go +++ b/vendor/github.com/grafana/dskit/server/fake_server.pb.go @@ -1,17 +1,21 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: server/fake_server.proto +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: fake_server.proto package server import ( context "context" fmt "fmt" - proto "github.com/golang/protobuf/proto" + proto "github.com/gogo/protobuf/proto" empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + io "io" math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -23,33 +27,37 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type FailWithHTTPErrorRequest struct { - Code int32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Code int32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` } -func (m *FailWithHTTPErrorRequest) Reset() { *m = FailWithHTTPErrorRequest{} } -func (m *FailWithHTTPErrorRequest) String() string { return proto.CompactTextString(m) } -func (*FailWithHTTPErrorRequest) ProtoMessage() {} +func (m *FailWithHTTPErrorRequest) Reset() { *m = FailWithHTTPErrorRequest{} } +func (*FailWithHTTPErrorRequest) ProtoMessage() {} func (*FailWithHTTPErrorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2267b95bf2c64cdc, []int{0} + return fileDescriptor_a932e7b7b9f5c118, []int{0} } - func (m *FailWithHTTPErrorRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FailWithHTTPErrorRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *FailWithHTTPErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FailWithHTTPErrorRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_FailWithHTTPErrorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *FailWithHTTPErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_FailWithHTTPErrorRequest.Merge(m, src) } func (m *FailWithHTTPErrorRequest) XXX_Size() int { - return xxx_messageInfo_FailWithHTTPErrorRequest.Size(m) + return m.Size() } func (m *FailWithHTTPErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_FailWithHTTPErrorRequest.DiscardUnknown(m) @@ -68,26 +76,70 @@ func init() { proto.RegisterType((*FailWithHTTPErrorRequest)(nil), "server.FailWithHTTPErrorRequest") } -func init() { proto.RegisterFile("server/fake_server.proto", fileDescriptor_2267b95bf2c64cdc) } - -var fileDescriptor_2267b95bf2c64cdc = []byte{ - // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x4e, 0x2d, 0x2a, - 0x4b, 0x2d, 0xd2, 0x4f, 0x4b, 0xcc, 0x4e, 0x8d, 0x87, 0xb0, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, - 0x85, 0xd8, 0x20, 0x3c, 0x29, 0xe9, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x7d, 0xb0, 0x68, 0x52, - 0x69, 0x9a, 0x7e, 0x6a, 0x6e, 0x41, 0x49, 0x25, 0x44, 0x91, 0x92, 0x1e, 0x97, 0x84, 0x5b, 0x62, - 0x66, 0x4e, 0x78, 0x66, 0x49, 0x86, 0x47, 0x48, 0x48, 0x80, 0x6b, 0x51, 0x51, 0x7e, 0x51, 0x50, - 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x90, 0x10, 0x17, 0x8b, 0x73, 0x7e, 0x4a, 0xaa, 0x04, 0xa3, - 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x98, 0x6d, 0x74, 0x9b, 0x89, 0x8b, 0xcb, 0x2d, 0x31, 0x3b, 0x35, - 0x18, 0x6c, 0xb6, 0x90, 0x35, 0x17, 0x7b, 0x70, 0x69, 0x72, 0x72, 0x6a, 0x6a, 0x8a, 0x90, 0x98, - 0x1e, 0xc4, 0x1e, 0x3d, 0x98, 0x3d, 0x7a, 0xae, 0x20, 0x7b, 0xa4, 0x70, 0x88, 0x2b, 0x31, 0x08, - 0x39, 0x72, 0xf1, 0xc2, 0xec, 0x06, 0xdb, 0x4b, 0x86, 0x11, 0xfe, 0x5c, 0x82, 0x18, 0xce, 0x17, - 0x52, 0xd0, 0x83, 0x86, 0x03, 0x2e, 0x9f, 0xe1, 0x31, 0xd0, 0x92, 0x8b, 0x35, 0x38, 0x27, 0x35, - 0xb5, 0x80, 0x2c, 0xef, 0x70, 0x07, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x92, 0x69, 0x80, 0x01, 0xa3, - 0x93, 0x6a, 0x94, 0x72, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x79, - 0x6a, 0x62, 0x59, 0x6a, 0x79, 0x7e, 0x51, 0x76, 0xb1, 0x7e, 0x72, 0x7e, 0x6e, 0x6e, 0x7e, 0x9e, - 0x3e, 0xc4, 0x5f, 0x49, 0x6c, 0x60, 0xad, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xcd, - 0xa4, 0xf9, 0xfc, 0x01, 0x00, 0x00, +func init() { proto.RegisterFile("fake_server.proto", fileDescriptor_a932e7b7b9f5c118) } + +var fileDescriptor_a932e7b7b9f5c118 = []byte{ + // 265 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4b, 0xcc, 0x4e, + 0x8d, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, + 0xf0, 0xa4, 0xa4, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0xa2, 0x49, 0xa5, 0x69, 0xfa, + 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x10, 0x45, 0x4a, 0x7a, 0x5c, 0x12, 0x6e, 0x89, 0x99, 0x39, 0xe1, + 0x99, 0x25, 0x19, 0x1e, 0x21, 0x21, 0x01, 0xae, 0x45, 0x45, 0xf9, 0x45, 0x41, 0xa9, 0x85, 0xa5, + 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0xce, 0xf9, 0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, + 0xac, 0x41, 0x60, 0xb6, 0xd1, 0x6d, 0x26, 0x2e, 0x2e, 0xb7, 0xc4, 0xec, 0xd4, 0x60, 0xb0, 0xd9, + 0x42, 0xd6, 0x5c, 0xec, 0xc1, 0xa5, 0xc9, 0xc9, 0xa9, 0xa9, 0x29, 0x42, 0x62, 0x7a, 0x10, 0x7b, + 0xf4, 0x60, 0xf6, 0xe8, 0xb9, 0x82, 0xec, 0x91, 0xc2, 0x21, 0xae, 0xc4, 0x20, 0xe4, 0xc8, 0xc5, + 0x0b, 0xb3, 0x1b, 0x6c, 0x2f, 0x19, 0x46, 0xf8, 0x73, 0x09, 0x62, 0x38, 0x5f, 0x48, 0x41, 0x0f, + 0x1a, 0x0e, 0xb8, 0x7c, 0x86, 0xc7, 0x40, 0x4b, 0x2e, 0xd6, 0xe0, 0x9c, 0xd4, 0xd4, 0x02, 0xb2, + 0xbc, 0xc3, 0x1d, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa6, 0x01, 0x06, 0x8c, 0x4e, 0x26, 0x17, + 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, + 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, + 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, + 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x26, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x43, 0x2b, 0x71, 0x6d, 0x04, 0x02, 0x00, 0x00, +} + +func (this *FailWithHTTPErrorRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FailWithHTTPErrorRequest) + if !ok { + that2, ok := that.(FailWithHTTPErrorRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + return true +} +func (this *FailWithHTTPErrorRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&server.FailWithHTTPErrorRequest{") + s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFakeServer(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } // Reference imports to suppress errors if they are not otherwise used. @@ -339,5 +391,263 @@ var _FakeServer_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "server/fake_server.proto", + Metadata: "fake_server.proto", +} + +func (m *FailWithHTTPErrorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FailWithHTTPErrorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FailWithHTTPErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Code != 0 { + i = encodeVarintFakeServer(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintFakeServer(dAtA []byte, offset int, v uint64) int { + offset -= sovFakeServer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FailWithHTTPErrorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovFakeServer(uint64(m.Code)) + } + return n +} + +func sovFakeServer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFakeServer(x uint64) (n int) { + return sovFakeServer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *FailWithHTTPErrorRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FailWithHTTPErrorRequest{`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func valueToStringFakeServer(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FailWithHTTPErrorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFakeServer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FailWithHTTPErrorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FailWithHTTPErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFakeServer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipFakeServer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFakeServer + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFakeServer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFakeServer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFakeServer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFakeServer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFakeServer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFakeServer + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthFakeServer + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFakeServer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFakeServer(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthFakeServer + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFakeServer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFakeServer = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/weaveworks/common/server/fake_server.proto b/vendor/github.com/grafana/dskit/server/fake_server.proto similarity index 90% rename from vendor/github.com/weaveworks/common/server/fake_server.proto rename to vendor/github.com/grafana/dskit/server/fake_server.proto index bbf39381..248a6f24 100644 --- a/vendor/github.com/weaveworks/common/server/fake_server.proto +++ b/vendor/github.com/grafana/dskit/server/fake_server.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package server; -option go_package = "github.com/weaveworks/common/server"; - import "google/protobuf/empty.proto"; service FakeServer { diff --git a/vendor/github.com/grafana/dskit/server/limits.go b/vendor/github.com/grafana/dskit/server/limits.go new file mode 100644 index 00000000..4a8651e3 --- /dev/null +++ b/vendor/github.com/grafana/dskit/server/limits.go @@ -0,0 +1,79 @@ +package server + +import ( + "context" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/tap" +) + +type GrpcInflightMethodLimiter interface { + // RPCCallStarting is called before request has been read into memory. + // All that's known about the request at this point is grpc method name. + // + // Returned context is used during the remainder of the gRPC call. + // + // Returned error should be convertible to gRPC Status via status.FromError, + // otherwise gRPC-server implementation-specific error will be returned to the client (codes.PermissionDenied in grpc@v1.55.0). + RPCCallStarting(ctx context.Context, methodName string, md metadata.MD) (context.Context, error) + + RPCCallFinished(ctx context.Context) +} + +func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInflightLimitCheck { + return &grpcInflightLimitCheck{ + methodLimiter: methodLimiter, + } +} + +// grpcInflightLimitCheck implements gRPC TapHandle and gRPC stats.Handler. +// grpcInflightLimitCheck can track inflight requests, and reject requests before even reading them into memory. +type grpcInflightLimitCheck struct { + methodLimiter GrpcInflightMethodLimiter +} + +// TapHandle is called after receiving grpc request and headers, but before reading any request data yet. +// If we reject request here (by returning non-nil error), it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). +// If we accept request (no error), eventually HandleRPC with stats.End notification will be called. +func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) (context.Context, error) { + if !isMethodNameValid(info.FullMethodName) { + // If method name is not valid, we let the request continue, but not call method limiter. + // Otherwise, we would not be able to call method limiter again when the call finishes, because in this case grpc server will not call stat handler. + return ctx, nil + } + + return g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header) +} + +func (g *grpcInflightLimitCheck) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { + return ctx +} + +func (g *grpcInflightLimitCheck) HandleRPC(ctx context.Context, rpcStats stats.RPCStats) { + // when request ends, and we started "inflight" request tracking for it, finish it. + if _, ok := rpcStats.(*stats.End); !ok { + return + } + + g.methodLimiter.RPCCallFinished(ctx) +} + +func (g *grpcInflightLimitCheck) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +func (g *grpcInflightLimitCheck) HandleConn(_ context.Context, _ stats.ConnStats) { + // Not interested. +} + +// This function mimics the check in grpc library, server.go, handleStream method. handleStream method can stop processing early, +// without calling stat handler if the method name is invalid. +func isMethodNameValid(method string) bool { + if method != "" && method[0] == '/' { + method = method[1:] + } + pos := strings.LastIndex(method, "/") + return pos >= 0 +} diff --git a/vendor/github.com/grafana/dskit/server/metrics.go b/vendor/github.com/grafana/dskit/server/metrics.go new file mode 100644 index 00000000..aa1c3e53 --- /dev/null +++ b/vendor/github.com/grafana/dskit/server/metrics.go @@ -0,0 +1,67 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/server/metrics.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package server + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/dskit/instrument" + "github.com/grafana/dskit/middleware" +) + +type Metrics struct { + TCPConnections *prometheus.GaugeVec + TCPConnectionsLimit *prometheus.GaugeVec + RequestDuration *prometheus.HistogramVec + ReceivedMessageSize *prometheus.HistogramVec + SentMessageSize *prometheus.HistogramVec + InflightRequests *prometheus.GaugeVec +} + +func NewServerMetrics(cfg Config) *Metrics { + reg := promauto.With(cfg.registererOrDefault()) + + return &Metrics{ + TCPConnections: reg.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: cfg.MetricsNamespace, + Name: "tcp_connections", + Help: "Current number of accepted TCP connections.", + }, []string{"protocol"}), + TCPConnectionsLimit: reg.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: cfg.MetricsNamespace, + Name: "tcp_connections_limit", + Help: "The max number of TCP connections that can be accepted (0 means no limit).", + }, []string{"protocol"}), + RequestDuration: reg.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.MetricsNamespace, + Name: "request_duration_seconds", + Help: "Time (in seconds) spent serving HTTP requests.", + Buckets: instrument.DefBuckets, + NativeHistogramBucketFactor: cfg.MetricsNativeHistogramFactor, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: time.Hour, + }, []string{"method", "route", "status_code", "ws"}), + ReceivedMessageSize: reg.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.MetricsNamespace, + Name: "request_message_bytes", + Help: "Size (in bytes) of messages received in the request.", + Buckets: middleware.BodySizeBuckets, + }, []string{"method", "route"}), + SentMessageSize: reg.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.MetricsNamespace, + Name: "response_message_bytes", + Help: "Size (in bytes) of messages sent in response.", + Buckets: middleware.BodySizeBuckets, + }, []string{"method", "route"}), + InflightRequests: reg.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: cfg.MetricsNamespace, + Name: "inflight_requests", + Help: "Current number of inflight requests.", + }, []string{"method", "route"}), + } +} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/grafana/dskit/server/server.go similarity index 59% rename from vendor/github.com/weaveworks/common/server/server.go rename to vendor/github.com/grafana/dskit/server/server.go index 89c99dd0..2b54283d 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -1,6 +1,11 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/server/server.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package server import ( + "context" "crypto/tls" "flag" "fmt" @@ -8,27 +13,30 @@ import ( "net" "net/http" _ "net/http/pprof" // anonymous import to get the pprof handler registered + "strconv" "strings" "time" + gokit_log "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/gorilla/mux" otgrpc "github.com/opentracing-contrib/go-grpc" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/config" "github.com/prometheus/exporter-toolkit/web" - "golang.org/x/net/context" + "github.com/soheilhy/cmux" "golang.org/x/net/netutil" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" - "github.com/weaveworks/common/httpgrpc" - httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/logging" - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/signals" + "github.com/grafana/dskit/httpgrpc" + httpgrpc_server "github.com/grafana/dskit/httpgrpc/server" + "github.com/grafana/dskit/log" + "github.com/grafana/dskit/middleware" + "github.com/grafana/dskit/signals" ) // Listen on the named network @@ -52,10 +60,13 @@ type SignalHandler interface { // TLSConfig contains TLS parameters for Config. type TLSConfig struct { - TLSCertPath string `yaml:"cert_file"` - TLSKeyPath string `yaml:"key_file"` - ClientAuth string `yaml:"client_auth_type"` - ClientCAs string `yaml:"client_ca_file"` + TLSCert string `yaml:"cert" doc:"description=Server TLS certificate. This configuration parameter is YAML only."` + TLSKey config.Secret `yaml:"key" doc:"description=Server TLS key. This configuration parameter is YAML only."` + ClientCAsText string `yaml:"client_ca" doc:"description=Root certificate authority used to verify client certificates. This configuration parameter is YAML only."` + TLSCertPath string `yaml:"cert_file"` + TLSKeyPath string `yaml:"key_file"` + ClientAuth string `yaml:"client_auth_type"` + ClientCAs string `yaml:"client_ca_file"` } // Config for a Server @@ -81,25 +92,30 @@ type Config struct { HTTPTLSConfig TLSConfig `yaml:"http_tls_config"` GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"` - RegisterInstrumentation bool `yaml:"register_instrumentation"` - ExcludeRequestInLog bool `yaml:"-"` - DisableRequestSuccessLog bool `yaml:"-"` + RegisterInstrumentation bool `yaml:"register_instrumentation"` + ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"` + ExcludeRequestInLog bool `yaml:"-"` + DisableRequestSuccessLog bool `yaml:"-"` ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` + HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"` HTTPServerWriteTimeout time.Duration `yaml:"http_server_write_timeout"` HTTPServerIdleTimeout time.Duration `yaml:"http_server_idle_timeout"` + HTTPLogClosedConnectionsWithoutResponse bool `yaml:"http_log_closed_connections_without_response_enabled"` + GRPCOptions []grpc.ServerOption `yaml:"-"` GRPCMiddleware []grpc.UnaryServerInterceptor `yaml:"-"` GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"` HTTPMiddleware []middleware.Interface `yaml:"-"` Router *mux.Router `yaml:"-"` DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` + RouteHTTPToGRPC bool `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GRPCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` @@ -107,16 +123,17 @@ type Config struct { GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` GRPCServerMinTimeBetweenPings time.Duration `yaml:"grpc_server_min_time_between_pings"` GRPCServerPingWithoutStreamAllowed bool `yaml:"grpc_server_ping_without_stream_allowed"` - - LogFormat logging.Format `yaml:"log_format"` - LogLevel logging.Level `yaml:"log_level"` - Log logging.Interface `yaml:"-"` - LogSourceIPs bool `yaml:"log_source_ips_enabled"` - LogSourceIPsHeader string `yaml:"log_source_ips_header"` - LogSourceIPsRegex string `yaml:"log_source_ips_regex"` - LogRequestHeaders bool `yaml:"log_request_headers"` - LogRequestAtInfoLevel bool `yaml:"log_request_at_info_level_enabled"` - LogRequestExcludeHeadersList string `yaml:"log_request_exclude_headers_list"` + GRPCServerNumWorkers int `yaml:"grpc_server_num_workers"` + + LogFormat string `yaml:"log_format"` + LogLevel log.Level `yaml:"log_level"` + Log gokit_log.Logger `yaml:"-"` + LogSourceIPs bool `yaml:"log_source_ips_enabled"` + LogSourceIPsHeader string `yaml:"log_source_ips_header"` + LogSourceIPsRegex string `yaml:"log_source_ips_regex"` + LogRequestHeaders bool `yaml:"log_request_headers"` + LogRequestAtInfoLevel bool `yaml:"log_request_at_info_level_enabled"` + LogRequestExcludeHeadersList string `yaml:"log_request_exclude_headers_list"` // If not set, default signal handler is used. SignalHandler SignalHandler `yaml:"-"` @@ -126,6 +143,9 @@ type Config struct { Gatherer prometheus.Gatherer `yaml:"-"` PathPrefix string `yaml:"http_path_prefix"` + + // This limiter is called for every started and finished gRPC request. + GrpcMethodLimiter GrpcInflightMethodLimiter `yaml:"-"` } var infinty = time.Duration(math.MaxInt64) @@ -151,13 +171,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GRPCListenPort, "server.grpc-listen-port", 9095, "gRPC server listen port.") f.IntVar(&cfg.GRPCConnLimit, "server.grpc-conn-limit", 0, "Maximum number of simultaneous grpc connections, <=0 to disable") f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).") + f.BoolVar(&cfg.ReportGRPCCodesInInstrumentationLabel, "server.report-grpc-codes-in-instrumentation-label-enabled", false, "If set to true, gRPC statuses will be reported in instrumentation labels with their string representations. Otherwise, they will be reported as \"error\".") f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 30*time.Second, "Timeout for graceful shutdowns") - f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for HTTP server") + f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for entire HTTP request, including headers and body.") + f.DurationVar(&cfg.HTTPServerReadHeaderTimeout, "server.http-read-header-timeout", 0, "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.") f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 30*time.Second, "Write timeout for HTTP server") f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server") - f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") + f.BoolVar(&cfg.HTTPLogClosedConnectionsWithoutResponse, "server.http-log-closed-connections-without-response-enabled", false, "Log closed connections that did not receive any response, most likely because client didn't send any request within timeout.") + f.IntVar(&cfg.GRPCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).") - f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)") + f.UintVar(&cfg.GRPCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)") f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") @@ -165,13 +188,24 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") f.DurationVar(&cfg.GRPCServerMinTimeBetweenPings, "server.grpc.keepalive.min-time-between-pings", 5*time.Minute, "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.") f.BoolVar(&cfg.GRPCServerPingWithoutStreamAllowed, "server.grpc.keepalive.ping-without-stream-allowed", false, "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.") + f.IntVar(&cfg.GRPCServerNumWorkers, "server.grpc.num-workers", 0, "If non-zero, configures the amount of GRPC server workers used to serve the requests.") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") - cfg.LogFormat.RegisterFlags(f) + f.StringVar(&cfg.LogFormat, "log.format", log.LogfmtFormat, "Output log messages in the given format. Valid formats: [logfmt, json]") cfg.LogLevel.RegisterFlags(f) f.BoolVar(&cfg.LogSourceIPs, "server.log-source-ips-enabled", false, "Optionally log the source IPs.") f.StringVar(&cfg.LogSourceIPsHeader, "server.log-source-ips-header", "", "Header field storing the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") f.StringVar(&cfg.LogSourceIPsRegex, "server.log-source-ips-regex", "", "Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") - f.BoolVar(&cfg.LogRequestAtInfoLevel, "server.log-request-at-info-level-enabled", false, "Optionally log requests at info level instead of debug level.") + f.BoolVar(&cfg.LogRequestHeaders, "server.log-request-headers", false, "Optionally log request headers.") + f.StringVar(&cfg.LogRequestExcludeHeadersList, "server.log-request-headers-exclude-list", "", "Comma separated list of headers to exclude from loggin. Only used if server.log-request-headers is true.") + f.BoolVar(&cfg.LogRequestAtInfoLevel, "server.log-request-at-info-level-enabled", false, "Optionally log requests at info level instead of debug level. Applies to request headers as well if server.log-request-headers is enabled.") +} + +func (cfg *Config) registererOrDefault() prometheus.Registerer { + // If user doesn't supply a Registerer/gatherer, use Prometheus' by default. + if cfg.Registerer != nil { + return cfg.Registerer + } + return prometheus.DefaultRegisterer } // Server wraps a HTTP and gRPC server, and some common initialization. @@ -183,74 +217,84 @@ type Server struct { grpcListener net.Listener httpListener net.Listener + // These fields are used to support grpc over the http server + // if RouteHTTPToGRPC is set. the fields are kept here + // so they can be initialized in New() and started in Run() + grpchttpmux cmux.CMux + grpcOnHTTPListener net.Listener + GRPCOnHTTPServer *grpc.Server + HTTP *mux.Router HTTPServer *http.Server GRPC *grpc.Server - Log logging.Interface + Log gokit_log.Logger Registerer prometheus.Registerer Gatherer prometheus.Gatherer } -// New makes a new Server +// New makes a new Server. It will panic if the metrics cannot be registered. func New(cfg Config) (*Server, error) { - // If user doesn't supply a logging implementation, by default instantiate - // logrus. - log := cfg.Log - if log == nil { - log = logging.NewLogrus(cfg.LogLevel) - } + metrics := NewServerMetrics(cfg) + return newServer(cfg, metrics) +} + +// NewWithMetrics makes a new Server using the provided Metrics. It will not attempt to register the metrics, +// the user is responsible for doing so. +func NewWithMetrics(cfg Config, metrics *Metrics) (*Server, error) { + return newServer(cfg, metrics) +} - // If user doesn't supply a registerer/gatherer, use Prometheus' by default. - reg := cfg.Registerer - if reg == nil { - reg = prometheus.DefaultRegisterer +func newServer(cfg Config, metrics *Metrics) (*Server, error) { + // If user doesn't supply a logging implementation, by default instantiate go-kit. + logger := cfg.Log + if logger == nil { + logger = log.NewGoKitWithLevel(cfg.LogLevel, cfg.LogFormat) } + gatherer := cfg.Gatherer if gatherer == nil { gatherer = prometheus.DefaultGatherer } - tcpConnections := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.MetricsNamespace, - Name: "tcp_connections", - Help: "Current number of accepted TCP connections.", - }, []string{"protocol"}) - reg.MustRegister(tcpConnections) - - tcpConnectionsLimit := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.MetricsNamespace, - Name: "tcp_connections_limit", - Help: "The max number of TCP connections that can be accepted (0 means no limit).", - }, []string{"protocol"}) - reg.MustRegister(tcpConnectionsLimit) - network := cfg.HTTPListenNetwork if network == "" { network = DefaultNetwork } // Setup listeners first, so we can fail early if the port is in use. - httpListener, err := net.Listen(network, fmt.Sprintf("%s:%d", cfg.HTTPListenAddress, cfg.HTTPListenPort)) + httpListener, err := net.Listen(network, net.JoinHostPort(cfg.HTTPListenAddress, strconv.Itoa(cfg.HTTPListenPort))) if err != nil { return nil, err } - httpListener = middleware.CountingListener(httpListener, tcpConnections.WithLabelValues("http")) + httpListener = middleware.CountingListener(httpListener, metrics.TCPConnections.WithLabelValues("http")) + if cfg.HTTPLogClosedConnectionsWithoutResponse { + httpListener = middleware.NewZeroResponseListener(httpListener, level.Warn(logger)) + } - tcpConnectionsLimit.WithLabelValues("http").Set(float64(cfg.HTTPConnLimit)) + metrics.TCPConnectionsLimit.WithLabelValues("http").Set(float64(cfg.HTTPConnLimit)) if cfg.HTTPConnLimit > 0 { httpListener = netutil.LimitListener(httpListener, cfg.HTTPConnLimit) } + var grpcOnHTTPListener net.Listener + var grpchttpmux cmux.CMux + if cfg.RouteHTTPToGRPC { + grpchttpmux = cmux.New(httpListener) + + httpListener = grpchttpmux.Match(cmux.HTTP1Fast("PATCH")) + grpcOnHTTPListener = grpchttpmux.Match(cmux.HTTP2()) + } + network = cfg.GRPCListenNetwork if network == "" { network = DefaultNetwork } - grpcListener, err := net.Listen(network, fmt.Sprintf("%s:%d", cfg.GRPCListenAddress, cfg.GRPCListenPort)) + grpcListener, err := net.Listen(network, net.JoinHostPort(cfg.GRPCListenAddress, strconv.Itoa(cfg.GRPCListenPort))) if err != nil { return nil, err } - grpcListener = middleware.CountingListener(grpcListener, tcpConnections.WithLabelValues("grpc")) + grpcListener = middleware.CountingListener(grpcListener, metrics.TCPConnections.WithLabelValues("grpc")) - tcpConnectionsLimit.WithLabelValues("grpc").Set(float64(cfg.GRPCConnLimit)) + metrics.TCPConnectionsLimit.WithLabelValues("grpc").Set(float64(cfg.GRPCConnLimit)) if cfg.GRPCConnLimit > 0 { grpcListener = netutil.LimitListener(grpcListener, cfg.GRPCConnLimit) } @@ -266,90 +310,67 @@ func New(cfg Config) (*Server, error) { // Setup TLS var httpTLSConfig *tls.Config - if len(cfg.HTTPTLSConfig.TLSCertPath) > 0 && len(cfg.HTTPTLSConfig.TLSKeyPath) > 0 { + if (len(cfg.HTTPTLSConfig.TLSCertPath) > 0 || len(cfg.HTTPTLSConfig.TLSCert) > 0) && + (len(cfg.HTTPTLSConfig.TLSKeyPath) > 0 || len(cfg.HTTPTLSConfig.TLSKey) > 0) { // Note: ConfigToTLSConfig from prometheus/exporter-toolkit is awaiting security review. httpTLSConfig, err = web.ConfigToTLSConfig(&web.TLSConfig{ - TLSCertPath: cfg.HTTPTLSConfig.TLSCertPath, - TLSKeyPath: cfg.HTTPTLSConfig.TLSKeyPath, - ClientAuth: cfg.HTTPTLSConfig.ClientAuth, - ClientCAs: cfg.HTTPTLSConfig.ClientCAs, - CipherSuites: cipherSuites, - MinVersion: minVersion, + TLSCert: cfg.HTTPTLSConfig.TLSCert, + TLSKey: config.Secret(cfg.HTTPTLSConfig.TLSKey), + ClientCAsText: cfg.HTTPTLSConfig.ClientCAsText, + TLSCertPath: cfg.HTTPTLSConfig.TLSCertPath, + TLSKeyPath: cfg.HTTPTLSConfig.TLSKeyPath, + ClientAuth: cfg.HTTPTLSConfig.ClientAuth, + ClientCAs: cfg.HTTPTLSConfig.ClientCAs, + CipherSuites: cipherSuites, + MinVersion: minVersion, }) if err != nil { return nil, fmt.Errorf("error generating http tls config: %v", err) } } var grpcTLSConfig *tls.Config - if len(cfg.GRPCTLSConfig.TLSCertPath) > 0 && len(cfg.GRPCTLSConfig.TLSKeyPath) > 0 { + if (len(cfg.GRPCTLSConfig.TLSCertPath) > 0 || len(cfg.GRPCTLSConfig.TLSCert) > 0) && + (len(cfg.GRPCTLSConfig.TLSKeyPath) > 0 || len(cfg.GRPCTLSConfig.TLSKey) > 0) { // Note: ConfigToTLSConfig from prometheus/exporter-toolkit is awaiting security review. grpcTLSConfig, err = web.ConfigToTLSConfig(&web.TLSConfig{ - TLSCertPath: cfg.GRPCTLSConfig.TLSCertPath, - TLSKeyPath: cfg.GRPCTLSConfig.TLSKeyPath, - ClientAuth: cfg.GRPCTLSConfig.ClientAuth, - ClientCAs: cfg.GRPCTLSConfig.ClientCAs, - CipherSuites: cipherSuites, - MinVersion: minVersion, + TLSCert: cfg.GRPCTLSConfig.TLSCert, + TLSKey: config.Secret(cfg.GRPCTLSConfig.TLSKey), + ClientCAsText: cfg.GRPCTLSConfig.ClientCAsText, + TLSCertPath: cfg.GRPCTLSConfig.TLSCertPath, + TLSKeyPath: cfg.GRPCTLSConfig.TLSKeyPath, + ClientAuth: cfg.GRPCTLSConfig.ClientAuth, + ClientCAs: cfg.GRPCTLSConfig.ClientCAs, + CipherSuites: cipherSuites, + MinVersion: minVersion, }) if err != nil { return nil, fmt.Errorf("error generating grpc tls config: %v", err) } } - // Prometheus histograms for requests. - requestDuration := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.MetricsNamespace, - Name: "request_duration_seconds", - Help: "Time (in seconds) spent serving HTTP requests.", - Buckets: instrument.DefBuckets, - NativeHistogramBucketFactor: cfg.MetricsNativeHistogramFactor, - NativeHistogramMaxBucketNumber: 100, - NativeHistogramMinResetDuration: time.Hour, - }, []string{"method", "route", "status_code", "ws"}) - reg.MustRegister(requestDuration) - - receivedMessageSize := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.MetricsNamespace, - Name: "request_message_bytes", - Help: "Size (in bytes) of messages received in the request.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - reg.MustRegister(receivedMessageSize) - - sentMessageSize := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.MetricsNamespace, - Name: "response_message_bytes", - Help: "Size (in bytes) of messages sent in response.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - reg.MustRegister(sentMessageSize) - - inflightRequests := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.MetricsNamespace, - Name: "inflight_requests", - Help: "Current number of inflight requests.", - }, []string{"method", "route"}) - reg.MustRegister(inflightRequests) - - log.WithField("http", httpListener.Addr()).WithField("grpc", grpcListener.Addr()).Infof("server listening on addresses") + level.Info(logger).Log("msg", "server listening on addresses", "http", httpListener.Addr(), "grpc", grpcListener.Addr()) // Setup gRPC server serverLog := middleware.GRPCServerLog{ - Log: log, + Log: logger, WithRequest: !cfg.ExcludeRequestInLog, DisableRequestSuccessLog: cfg.DisableRequestSuccessLog, } + var reportGRPCStatusesOptions []middleware.InstrumentationOption + if cfg.ReportGRPCCodesInInstrumentationLabel { + reportGRPCStatusesOptions = []middleware.InstrumentationOption{middleware.ReportGRPCStatusOption} + } grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), - middleware.UnaryServerInstrumentInterceptor(requestDuration), + middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) grpcStreamMiddleware := []grpc.StreamServerInterceptor{ serverLog.StreamServerInterceptor, otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), - middleware.StreamServerInstrumentInterceptor(requestDuration), + middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) @@ -371,17 +392,32 @@ func New(cfg Config) (*Server, error) { grpc.ChainStreamInterceptor(grpcStreamMiddleware...), grpc.KeepaliveParams(grpcKeepAliveOptions), grpc.KeepaliveEnforcementPolicy(grpcKeepAliveEnforcementPolicy), - grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), + grpc.MaxRecvMsgSize(cfg.GRPCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), - grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), - grpc.StatsHandler(middleware.NewStatsHandler(receivedMessageSize, sentMessageSize, inflightRequests)), + grpc.MaxConcurrentStreams(uint32(cfg.GRPCServerMaxConcurrentStreams)), + grpc.NumStreamWorkers(uint32(cfg.GRPCServerNumWorkers)), + } + + if cfg.GrpcMethodLimiter != nil { + grpcServerLimit := newGrpcInflightLimitCheck(cfg.GrpcMethodLimiter) + grpcOptions = append(grpcOptions, grpc.InTapHandle(grpcServerLimit.TapHandle), grpc.StatsHandler(grpcServerLimit)) } + + grpcOptions = append(grpcOptions, + grpc.StatsHandler(middleware.NewStatsHandler( + metrics.ReceivedMessageSize, + metrics.SentMessageSize, + metrics.InflightRequests, + )), + ) + grpcOptions = append(grpcOptions, cfg.GRPCOptions...) if grpcTLSConfig != nil { grpcCreds := credentials.NewTLS(grpcTLSConfig) grpcOptions = append(grpcOptions, grpc.Creds(grpcCreds)) } grpcServer := grpc.NewServer(grpcOptions...) + grpcOnHTTPServer := grpc.NewServer(grpcOptions...) // Setup HTTP server var router *mux.Router @@ -399,29 +435,35 @@ func New(cfg Config) (*Server, error) { RegisterInstrumentationWithGatherer(router, gatherer) } - var sourceIPs *middleware.SourceIPExtractor - if cfg.LogSourceIPs { - sourceIPs, err = middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) - if err != nil { - return nil, fmt.Errorf("error setting up source IP extraction: %v", err) - } + sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) + if err != nil { + return nil, fmt.Errorf("error setting up source IP extraction: %v", err) + } + logSourceIPs := sourceIPs + if !cfg.LogSourceIPs { + // We always include the source IPs for traces, + // but only want to log them in the middleware if that is enabled. + logSourceIPs = nil } + defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) + defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog + defaultHTTPMiddleware := []middleware.Interface{ middleware.Tracer{ RouteMatcher: router, SourceIPs: sourceIPs, }, - middleware.NewLogMiddleware(log, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, sourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")), + defaultLogMiddleware, middleware.Instrument{ RouteMatcher: router, - Duration: requestDuration, - RequestBodySize: receivedMessageSize, - ResponseBodySize: sentMessageSize, - InflightRequests: inflightRequests, + Duration: metrics.RequestDuration, + RequestBodySize: metrics.ReceivedMessageSize, + ResponseBodySize: metrics.SentMessageSize, + InflightRequests: metrics.InflightRequests, }, } - httpMiddleware := []middleware.Interface{} + var httpMiddleware []middleware.Interface if cfg.DoNotAddDefaultHTTPMiddleware { httpMiddleware = cfg.HTTPMiddleware } else { @@ -429,10 +471,11 @@ func New(cfg Config) (*Server, error) { } httpServer := &http.Server{ - ReadTimeout: cfg.HTTPServerReadTimeout, - WriteTimeout: cfg.HTTPServerWriteTimeout, - IdleTimeout: cfg.HTTPServerIdleTimeout, - Handler: middleware.Merge(httpMiddleware...).Wrap(router), + ReadTimeout: cfg.HTTPServerReadTimeout, + ReadHeaderTimeout: cfg.HTTPServerReadHeaderTimeout, + WriteTimeout: cfg.HTTPServerWriteTimeout, + IdleTimeout: cfg.HTTPServerIdleTimeout, + Handler: middleware.Merge(httpMiddleware...).Wrap(router), } if httpTLSConfig != nil { httpServer.TLSConfig = httpTLSConfig @@ -440,21 +483,24 @@ func New(cfg Config) (*Server, error) { handler := cfg.SignalHandler if handler == nil { - handler = signals.NewHandler(log) + handler = signals.NewHandler(logger) } return &Server{ - cfg: cfg, - httpListener: httpListener, - grpcListener: grpcListener, - handler: handler, - - HTTP: router, - HTTPServer: httpServer, - GRPC: grpcServer, - Log: log, - Registerer: reg, - Gatherer: gatherer, + cfg: cfg, + httpListener: httpListener, + grpcListener: grpcListener, + grpcOnHTTPListener: grpcOnHTTPListener, + handler: handler, + grpchttpmux: grpchttpmux, + + HTTP: router, + HTTPServer: httpServer, + GRPC: grpcServer, + GRPCOnHTTPServer: grpcOnHTTPServer, + Log: logger, + Registerer: cfg.registererOrDefault(), + Gatherer: gatherer, }, nil } @@ -507,19 +553,37 @@ func (s *Server) Run() error { go func() { err := s.GRPC.Serve(s.grpcListener) - if err == grpc.ErrServerStopped { - err = nil - } - - select { - case errChan <- err: - default: - } + handleGRPCError(err, errChan) }() + // grpchttpmux will only be set if grpchttpmux RouteHTTPToGRPC is set + if s.grpchttpmux != nil { + go func() { + err := s.grpchttpmux.Serve() + handleGRPCError(err, errChan) + }() + go func() { + err := s.GRPCOnHTTPServer.Serve(s.grpcOnHTTPListener) + handleGRPCError(err, errChan) + }() + } + return <-errChan } +// handleGRPCError consolidates GRPC Server error handling by sending +// any error to errChan except for grpc.ErrServerStopped which is ignored. +func handleGRPCError(err error, errChan chan error) { + if err == grpc.ErrServerStopped { + err = nil + } + + select { + case errChan <- err: + default: + } +} + // HTTPListenAddr exposes `net.Addr` that `Server` is listening to for HTTP connections. func (s *Server) HTTPListenAddr() net.Addr { return s.httpListener.Addr() @@ -541,6 +605,6 @@ func (s *Server) Shutdown() { ctx, cancel := context.WithTimeout(context.Background(), s.cfg.ServerGracefulShutdownTimeout) defer cancel() // releases resources if httpServer.Shutdown completes before timeout elapses - s.HTTPServer.Shutdown(ctx) + _ = s.HTTPServer.Shutdown(ctx) s.GRPC.GracefulStop() } diff --git a/vendor/github.com/weaveworks/common/server/tls_config.go b/vendor/github.com/grafana/dskit/server/tls_config.go similarity index 87% rename from vendor/github.com/weaveworks/common/server/tls_config.go rename to vendor/github.com/grafana/dskit/server/tls_config.go index b374a67c..128c6d04 100644 --- a/vendor/github.com/weaveworks/common/server/tls_config.go +++ b/vendor/github.com/grafana/dskit/server/tls_config.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/server/tls_config.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package server import ( diff --git a/vendor/github.com/grafana/dskit/services/basic_service.go b/vendor/github.com/grafana/dskit/services/basic_service.go index 6ced33aa..185a4a10 100644 --- a/vendor/github.com/grafana/dskit/services/basic_service.go +++ b/vendor/github.com/grafana/dskit/services/basic_service.go @@ -255,20 +255,20 @@ func (b *BasicService) StopAsync() { // // Example: // -// func (s *exampleService) Send(msg string) bool { -// ctx := s.ServiceContext() -// if ctx == nil { -// // Service is not yet started -// return false -// } -// select { -// case s.ch <- msg: -// return true -// case <-ctx.Done(): -// // Service is not running anymore. -// return false -// } +// func (s *exampleService) Send(msg string) bool { +// ctx := s.ServiceContext() +// if ctx == nil { +// // Service is not yet started +// return false // } +// select { +// case s.ch <- msg: +// return true +// case <-ctx.Done(): +// // Service is not running anymore. +// return false +// } +// } // // This is not part of Service interface, and clients of the Service should not use it. func (b *BasicService) ServiceContext() context.Context { diff --git a/vendor/github.com/grafana/dskit/services/manager.go b/vendor/github.com/grafana/dskit/services/manager.go index 3ef6aad4..da5ebf73 100644 --- a/vendor/github.com/grafana/dskit/services/manager.go +++ b/vendor/github.com/grafana/dskit/services/manager.go @@ -284,7 +284,7 @@ func (l managerServiceListener) Terminated(from State) { l.m.serviceStateChanged(l.s, from, Terminated) } -func (l managerServiceListener) Failed(from State, failure error) { +func (l managerServiceListener) Failed(from State, _ error) { l.m.serviceStateChanged(l.s, from, Failed) } diff --git a/vendor/github.com/grafana/dskit/services/service.go b/vendor/github.com/grafana/dskit/services/service.go index 6170951a..e3de5a09 100644 --- a/vendor/github.com/grafana/dskit/services/service.go +++ b/vendor/github.com/grafana/dskit/services/service.go @@ -41,18 +41,17 @@ func (s State) String() string { // // State diagram for the service: // -// ┌────────────────────────────────────────────────────────────────────┐ -// │ │ -// │ ▼ -// ┌─────┐ ┌──────────┐ ┌─────────┐ ┌──────────┐ ┌────────────┐ -// │ New │─────▶│ Starting │─────▶│ Running │────▶│ Stopping │───┬─▶│ Terminated │ -// └─────┘ └──────────┘ └─────────┘ └──────────┘ │ └────────────┘ -// │ │ -// │ │ -// │ │ ┌────────┐ -// └──────────────────────────────────────────┴──▶│ Failed │ -// └────────┘ -// +// ┌────────────────────────────────────────────────────────────────────┐ +// │ │ +// │ ▼ +// ┌─────┐ ┌──────────┐ ┌─────────┐ ┌──────────┐ ┌────────────┐ +// │ New │─────▶│ Starting │─────▶│ Running │────▶│ Stopping │───┬─▶│ Terminated │ +// └─────┘ └──────────┘ └─────────┘ └──────────┘ │ └────────────┘ +// │ │ +// │ │ +// │ │ ┌────────┐ +// └──────────────────────────────────────────┴──▶│ Failed │ +// └────────┘ type Service interface { // StartAsync starts Service asynchronously. Service must be in New State, otherwise error is returned. // Context is used as a parent context for service own context. diff --git a/vendor/github.com/weaveworks/common/signals/signals.go b/vendor/github.com/grafana/dskit/signals/signals.go similarity index 63% rename from vendor/github.com/weaveworks/common/signals/signals.go rename to vendor/github.com/grafana/dskit/signals/signals.go index 29d45fc8..ff642315 100644 --- a/vendor/github.com/weaveworks/common/signals/signals.go +++ b/vendor/github.com/grafana/dskit/signals/signals.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/signals/signals.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package signals import ( @@ -6,7 +10,10 @@ import ( "runtime" "syscall" - "github.com/weaveworks/common/logging" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + dskit_log "github.com/grafana/dskit/log" ) // SignalReceiver represents a subsystem/server/... that can be stopped or @@ -19,13 +26,13 @@ type SignalReceiver interface { // On SIGINT or SIGTERM it will exit, on SIGQUIT it // will dump goroutine stacks to the Logger. type Handler struct { - log logging.Interface + log log.Logger receivers []SignalReceiver quit chan struct{} } // NewHandler makes a new Handler. -func NewHandler(log logging.Interface, receivers ...SignalReceiver) *Handler { +func NewHandler(log log.Logger, receivers ...SignalReceiver) *Handler { return &Handler{ log: log, receivers: receivers, @@ -47,19 +54,19 @@ func (h *Handler) Loop() { for { select { case <-h.quit: - h.log.Infof("=== Handler.Stop()'d ===") + level.Info(h.log).Log("msg", "=== Handler.Stop()'d ===") return case sig := <-sigs: switch sig { case syscall.SIGINT, syscall.SIGTERM: - h.log.Infof("=== received SIGINT/SIGTERM ===\n*** exiting") + level.Info(h.log).Log("msg", "=== received SIGINT/SIGTERM ===\n*** exiting") for _, subsystem := range h.receivers { - subsystem.Stop() + _ = subsystem.Stop() } return case syscall.SIGQUIT: stacklen := runtime.Stack(buf, true) - h.log.Infof("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end", buf[:stacklen]) + level.Info(h.log).Log("msg", dskit_log.LazySprintf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end", buf[:stacklen])) } } } @@ -68,6 +75,6 @@ func (h *Handler) Loop() { // SignalHandlerLoop blocks until it receives a SIGINT, SIGTERM or SIGQUIT. // For SIGINT and SIGTERM, it exits; for SIGQUIT is print a goroutine stack // dump. -func SignalHandlerLoop(log logging.Interface, ss ...SignalReceiver) { +func SignalHandlerLoop(log log.Logger, ss ...SignalReceiver) { NewHandler(log, ss...).Loop() } diff --git a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go new file mode 100644 index 00000000..08653eda --- /dev/null +++ b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go @@ -0,0 +1,169 @@ +package spanlogger + +import ( + "context" + + "go.uber.org/atomic" // Really just need sync/atomic but there is a lint rule preventing it. + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + otlog "github.com/opentracing/opentracing-go/log" + + "github.com/grafana/dskit/tracing" +) + +type loggerCtxMarker struct{} + +// TenantResolver provides methods for extracting tenant IDs from a context. +type TenantResolver interface { + // TenantID tries to extract a tenant ID from a context. + TenantID(context.Context) (string, error) + // TenantIDs tries to extract tenant IDs from a context. + TenantIDs(context.Context) ([]string, error) +} + +const ( + // TenantIDsTagName is the tenant IDs tag name. + TenantIDsTagName = "tenant_ids" +) + +var ( + loggerCtxKey = &loggerCtxMarker{} +) + +// SpanLogger unifies tracing and logging, to reduce repetition. +type SpanLogger struct { + ctx context.Context // context passed in, with logger + resolver TenantResolver // passed in + baseLogger log.Logger // passed in + logger atomic.Pointer[log.Logger] // initialized on first use + opentracing.Span + sampled bool + debugEnabled bool +} + +// New makes a new SpanLogger with a log.Logger to send logs to. The provided context will have the logger attached +// to it and can be retrieved with FromContext. +func New(ctx context.Context, logger log.Logger, method string, resolver TenantResolver, kvps ...interface{}) (*SpanLogger, context.Context) { + span, ctx := opentracing.StartSpanFromContext(ctx, method) + if ids, err := resolver.TenantIDs(ctx); err == nil && len(ids) > 0 { + span.SetTag(TenantIDsTagName, ids) + } + _, sampled := tracing.ExtractSampledTraceID(ctx) + l := &SpanLogger{ + ctx: ctx, + resolver: resolver, + baseLogger: log.With(logger, "method", method), + Span: span, + sampled: sampled, + debugEnabled: debugEnabled(logger), + } + if len(kvps) > 0 { + l.DebugLog(kvps...) + } + + ctx = context.WithValue(ctx, loggerCtxKey, logger) + return l, ctx +} + +// FromContext returns a span logger using the current parent span. +// If there is no parent span, the SpanLogger will only log to the logger +// within the context. If the context doesn't have a logger, the fallback +// logger is used. +func FromContext(ctx context.Context, fallback log.Logger, resolver TenantResolver) *SpanLogger { + logger, ok := ctx.Value(loggerCtxKey).(log.Logger) + if !ok { + logger = fallback + } + sampled := false + sp := opentracing.SpanFromContext(ctx) + if sp == nil { + sp = opentracing.NoopTracer{}.StartSpan("noop") + } else { + _, sampled = tracing.ExtractSampledTraceID(ctx) + } + return &SpanLogger{ + ctx: ctx, + baseLogger: logger, + resolver: resolver, + Span: sp, + sampled: sampled, + debugEnabled: debugEnabled(logger), + } +} + +// Detect whether we should output debug logging. +// false iff the logger says it's not enabled; true if the logger doesn't say. +func debugEnabled(logger log.Logger) bool { + if x, ok := logger.(interface{ DebugEnabled() bool }); ok && !x.DebugEnabled() { + return false + } + return true +} + +// Log implements gokit's Logger interface; sends logs to underlying logger and +// also puts the on the spans. +func (s *SpanLogger) Log(kvps ...interface{}) error { + s.getLogger().Log(kvps...) + return s.spanLog(kvps...) +} + +// DebugLog is more efficient than level.Debug().Log(). +// Also it swallows the error return because nobody checks for errors on debug logs. +func (s *SpanLogger) DebugLog(kvps ...interface{}) { + if s.debugEnabled { + // The call to Log() through an interface makes its argument escape, so make a copy here, + // in the debug-only path, so the function is faster for the non-debug path. + localCopy := append([]any{}, kvps...) + level.Debug(s.getLogger()).Log(localCopy...) + } + _ = s.spanLog(kvps...) +} + +func (s *SpanLogger) spanLog(kvps ...interface{}) error { + if !s.sampled { + return nil + } + fields, err := otlog.InterleavedKVToFields(kvps...) + if err != nil { + return err + } + s.Span.LogFields(fields...) + return nil +} + +// Error sets error flag and logs the error on the span, if non-nil. Returns the err passed in. +func (s *SpanLogger) Error(err error) error { + if err == nil || !s.sampled { + return err + } + ext.Error.Set(s.Span, true) + s.Span.LogFields(otlog.Error(err)) + return err +} + +func (s *SpanLogger) getLogger() log.Logger { + pLogger := s.logger.Load() + if pLogger != nil { + return *pLogger + } + // If no logger stored in the pointer, start to make one. + logger := s.baseLogger + userID, err := s.resolver.TenantID(s.ctx) + if err == nil && userID != "" { + logger = log.With(logger, "user", userID) + } + + traceID, ok := tracing.ExtractSampledTraceID(s.ctx) + if ok { + logger = log.With(logger, "traceID", traceID) + } + // If the value has been set by another goroutine, fetch that other value and discard the one we made. + if !s.logger.CompareAndSwap(nil, &logger) { + pLogger := s.logger.Load() + logger = *pLogger + } + return logger +} diff --git a/vendor/github.com/grafana/dskit/tenant/resolver.go b/vendor/github.com/grafana/dskit/tenant/resolver.go index 607a290e..aa19d75b 100644 --- a/vendor/github.com/grafana/dskit/tenant/resolver.go +++ b/vendor/github.com/grafana/dskit/tenant/resolver.go @@ -6,7 +6,7 @@ import ( "net/http" "strings" - "github.com/weaveworks/common/user" + "github.com/grafana/dskit/user" ) var defaultResolver Resolver = NewSingleResolver() @@ -22,6 +22,7 @@ func WithDefaultResolver(r Resolver) { // supplied or user.ErrTooManyOrgIDs if there are multiple tenant IDs present. // // ignore stutter warning +// //nolint:revive func TenantID(ctx context.Context) (string, error) { return defaultResolver.TenantID(ctx) @@ -32,6 +33,7 @@ func TenantID(ctx context.Context) (string, error) { // NormalizeTenantIDs). // // ignore stutter warning +// //nolint:revive func TenantIDs(ctx context.Context) ([]string, error) { return defaultResolver.TenantIDs(ctx) diff --git a/vendor/github.com/grafana/dskit/tenant/tenant.go b/vendor/github.com/grafana/dskit/tenant/tenant.go index 99c1cc4a..a5807500 100644 --- a/vendor/github.com/grafana/dskit/tenant/tenant.go +++ b/vendor/github.com/grafana/dskit/tenant/tenant.go @@ -7,7 +7,7 @@ import ( "sort" "strings" - "github.com/weaveworks/common/user" + "github.com/grafana/dskit/user" ) var ( @@ -99,6 +99,7 @@ func isSupported(c rune) bool { // TenantIDsFromOrgID extracts different tenants from an orgID string value // // ignore stutter warning +// //nolint:revive func TenantIDsFromOrgID(orgID string) ([]string, error) { return TenantIDs(user.InjectOrgID(context.TODO(), orgID)) diff --git a/vendor/github.com/weaveworks/common/tracing/tracing.go b/vendor/github.com/grafana/dskit/tracing/tracing.go similarity index 92% rename from vendor/github.com/weaveworks/common/tracing/tracing.go rename to vendor/github.com/grafana/dskit/tracing/tracing.go index 55fdfde9..66b3a3ce 100644 --- a/vendor/github.com/weaveworks/common/tracing/tracing.go +++ b/vendor/github.com/grafana/dskit/tracing/tracing.go @@ -1,3 +1,7 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/tracing/tracing.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package tracing import ( diff --git a/vendor/github.com/weaveworks/common/user/grpc.go b/vendor/github.com/grafana/dskit/user/grpc.go similarity index 85% rename from vendor/github.com/weaveworks/common/user/grpc.go rename to vendor/github.com/grafana/dskit/user/grpc.go index 64f0491a..201b835e 100644 --- a/vendor/github.com/weaveworks/common/user/grpc.go +++ b/vendor/github.com/grafana/dskit/user/grpc.go @@ -1,7 +1,12 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/user/grpc.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package user import ( - "golang.org/x/net/context" + "context" + "google.golang.org/grpc/metadata" ) diff --git a/vendor/github.com/weaveworks/common/user/http.go b/vendor/github.com/grafana/dskit/user/http.go similarity index 91% rename from vendor/github.com/weaveworks/common/user/http.go rename to vendor/github.com/grafana/dskit/user/http.go index c15fd410..ca015b36 100644 --- a/vendor/github.com/weaveworks/common/user/http.go +++ b/vendor/github.com/grafana/dskit/user/http.go @@ -1,9 +1,12 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/user/http.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package user import ( + "context" "net/http" - - "golang.org/x/net/context" ) const ( diff --git a/vendor/github.com/weaveworks/common/user/id.go b/vendor/github.com/grafana/dskit/user/id.go similarity index 67% rename from vendor/github.com/weaveworks/common/user/id.go rename to vendor/github.com/grafana/dskit/user/id.go index a1f91f0c..bafecdce 100644 --- a/vendor/github.com/weaveworks/common/user/id.go +++ b/vendor/github.com/grafana/dskit/user/id.go @@ -1,9 +1,12 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/user/id.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + package user import ( - "golang.org/x/net/context" - - "github.com/weaveworks/common/errors" + "context" + "errors" ) type contextKey int @@ -15,14 +18,13 @@ const ( ) // Errors that we return -const ( - ErrNoOrgID = errors.Error("no org id") - ErrDifferentOrgIDPresent = errors.Error("different org ID already present") - ErrTooManyOrgIDs = errors.Error("multiple org IDs present") +var ( + ErrNoOrgID = errors.New("no org id") + ErrDifferentOrgIDPresent = errors.New("different org ID already present") + ErrTooManyOrgIDs = errors.New("multiple org IDs present") - ErrNoUserID = errors.Error("no user id") - ErrDifferentUserIDPresent = errors.Error("different user ID already present") - ErrTooManyUserIDs = errors.Error("multiple user IDs present") + ErrNoUserID = errors.New("no user id") + ErrDifferentUserIDPresent = errors.New("different user ID already present") ) // ExtractOrgID gets the org ID from the context. diff --git a/vendor/github.com/grafana/dskit/user/logging.go b/vendor/github.com/grafana/dskit/user/logging.go new file mode 100644 index 00000000..a7d89b53 --- /dev/null +++ b/vendor/github.com/grafana/dskit/user/logging.go @@ -0,0 +1,26 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/user/logging.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package user + +import ( + "context" + + "github.com/go-kit/log" +) + +// LogWith returns user and org information from the context as log fields. +func LogWith(ctx context.Context, logger log.Logger) log.Logger { + userID, err := ExtractUserID(ctx) + if err == nil { + logger = log.With(logger, "userID", userID) + } + + orgID, err := ExtractOrgID(ctx) + if err == nil { + logger = log.With(logger, "orgID", orgID) + } + + return logger +} diff --git a/vendor/github.com/weaveworks/promrus/LICENSE b/vendor/github.com/grafana/gomemcache/LICENSE similarity index 99% rename from vendor/github.com/weaveworks/promrus/LICENSE rename to vendor/github.com/grafana/gomemcache/LICENSE index 8dada3ed..d6456956 100644 --- a/vendor/github.com/weaveworks/promrus/LICENSE +++ b/vendor/github.com/grafana/gomemcache/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go new file mode 100644 index 00000000..67288a12 --- /dev/null +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -0,0 +1,913 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memcache provides a client for the memcached cache server. +package memcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Similar to: +// https://godoc.org/google.golang.org/appengine/memcache + +var ( + // ErrCacheMiss means that a Get failed because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + + // ErrServerError means that a server error occurred. + ErrServerError = errors.New("memcache: server error") + + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + + // ErrMalformedKey is returned when an invalid key is used. + // Keys must be at maximum 250 bytes long and not + // contain whitespace or control characters. + ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") + + // ErrNoServers is returned when no servers are configured or available. + ErrNoServers = errors.New("memcache: no servers configured or available") +) + +const ( + // DefaultTimeout is the default socket read/write timeout. + DefaultTimeout = 100 * time.Millisecond + + // DefaultMaxIdleConns is the default maximum number of idle connections + // kept for any single address. + DefaultMaxIdleConns = 2 + + // releaseIdleConnsCheckFrequency is how frequently to check if there are idle + // connections to release, in order to honor the configured min conns headroom. + releaseIdleConnsCheckFrequency = time.Minute + + // defaultRecentlyUsedConnsThreshold is the default grace period given to an + // idle connection to consider it "recently used". The default value has been + // set equal to the default TCP TIME_WAIT timeout in linux. + defaultRecentlyUsedConnsThreshold = 2 * time.Minute +) + +const buffered = 8 // arbitrary buffered channel size, for readability + +// resumableError returns true if err is only a protocol-level cache error. +// This is used to determine whether or not a server connection should +// be re-used or not. If an error occurs, by default we don't reuse the +// connection, unless it was just a cache error. +func resumableError(err error) bool { + switch err { + case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: + return true + } + return false +} + +func legalKey(key string) bool { + if len(key) > 250 { + return false + } + for i := 0; i < len(key); i++ { + if key[i] <= ' ' || key[i] == 0x7f { + return false + } + } + return true +} + +var ( + crlf = []byte("\r\n") + resultOK = []byte("OK\r\n") + resultStored = []byte("STORED\r\n") + resultNotStored = []byte("NOT_STORED\r\n") + resultExists = []byte("EXISTS\r\n") + resultNotFound = []byte("NOT_FOUND\r\n") + resultDeleted = []byte("DELETED\r\n") + resultEnd = []byte("END\r\n") + resultOk = []byte("OK\r\n") + resultTouched = []byte("TOUCHED\r\n") + + resultClientErrorPrefix = []byte("CLIENT_ERROR ") + versionPrefix = []byte("VERSION") + valuePrefix = []byte("VALUE ") +) + +// New returns a memcache client using the provided server(s) +// with equal weight. If a server is listed multiple times, +// it gets a proportional amount of weight. +func New(server ...string) *Client { + ss := new(ServerList) + _ = ss.SetServers(server...) + return NewFromSelector(ss) +} + +// NewFromSelector returns a new Client using the provided ServerSelector. +func NewFromSelector(ss ServerSelector) *Client { + c := &Client{ + selector: ss, + closed: make(chan struct{}), + } + + go c.releaseIdleConnectionsUntilClosed() + + return c +} + +// Client is a memcache client. +// It is safe for unlocked use by multiple concurrent goroutines. +type Client struct { + // DialTimeout specifies a custom dialer used to dial new connections to a server. + DialTimeout func(network, address string, timeout time.Duration) (net.Conn, error) + + // Timeout specifies the socket read/write timeout. + // If zero, DefaultTimeout is used. + Timeout time.Duration + + // ConnectTimeout specifies the timeout for new connections. + // If zero, DefaultTimeout is used. + ConnectTimeout time.Duration + + // MinIdleConnsHeadroomPercentage specifies the percentage of minimum number of idle connections + // that should be kept open, compared to the number of free but recently used connections. + // If there are idle connections but none of them has been recently used, then all idle + // connections get closed. + // + // If the configured value is negative, idle connections are never closed. + MinIdleConnsHeadroomPercentage float64 + + // MaxIdleConns specifies the maximum number of idle connections that will + // be maintained per address. If less than one, DefaultMaxIdleConns will be + // used. + // + // Consider your expected traffic rates and latency carefully. This should + // be set to a number higher than your peak parallel requests. + MaxIdleConns int + + // WriteBufferSizeBytes specifies the size of the write buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + WriteBufferSizeBytes int + + // ReadBufferSizeBytes specifies the size of the read buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + ReadBufferSizeBytes int + + // recentlyUsedConnsThreshold is the default grace period given to an + // idle connection to consider it "recently used". Recently used connections + // are never closed even if idle. + // + // This field is used for testing. + recentlyUsedConnsThreshold time.Duration + + // closed channel gets closed once the Client.Close() is called. Once closed, + // resources should be released. + closed chan struct{} + closeOnce sync.Once + + selector ServerSelector + + lk sync.Mutex + freeconn map[string][]*conn +} + +// Item is an item to be got or stored in a memcached server. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + + // Value is the Item's value. + Value []byte + + // Flags are server-opaque flags whose semantics are entirely + // up to the app. + Flags uint32 + + // Expiration is the cache expiration time, in seconds: either a relative + // time from now (up to 1 month), or an absolute Unix epoch time. + // Zero means the Item has no expiration time. + Expiration int32 + + // Compare and swap ID. + casid uint64 +} + +// conn is a connection to a server. +type conn struct { + nc net.Conn + rw *bufio.ReadWriter + addr net.Addr + c *Client + + // The timestamp since when this connection is idle. This is used to close + // idle connections. + idleSince time.Time +} + +// release returns this connection back to the client's free pool +func (cn *conn) release() { + cn.c.putFreeConn(cn.addr, cn) +} + +func (cn *conn) extendDeadline() { + _ = cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) +} + +// condRelease releases this connection if the error pointed to by err +// is nil (not an error) or is only a protocol level error (e.g. a +// cache miss). The purpose is to not recycle TCP connections that +// are bad. +func (cn *conn) condRelease(err *error) { + if *err == nil || resumableError(*err) { + cn.release() + } else { + cn.nc.Close() + } +} + +func (c *Client) putFreeConn(addr net.Addr, cn *conn) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + c.freeconn = make(map[string][]*conn) + } + freelist := c.freeconn[addr.String()] + if len(freelist) >= c.maxIdleConns() { + cn.nc.Close() + return + } + + cn.idleSince = time.Now() + c.freeconn[addr.String()] = append(freelist, cn) +} + +func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + return nil, false + } + freelist, ok := c.freeconn[addr.String()] + if !ok || len(freelist) == 0 { + return nil, false + } + + // Pop the connection from the end of the list. This way we prefer to always reuse + // the same connections, so that the min idle connections logic is effective. + cn = freelist[len(freelist)-1] + c.freeconn[addr.String()] = freelist[:len(freelist)-1] + return cn, true +} + +func (c *Client) netTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + return DefaultTimeout +} + +func (c *Client) connectTimeout() time.Duration { + if c.ConnectTimeout != 0 { + return c.ConnectTimeout + } + return DefaultTimeout +} + +func (c *Client) maxIdleConns() int { + if c.MaxIdleConns > 0 { + return c.MaxIdleConns + } + return DefaultMaxIdleConns +} + +func (c *Client) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} + +func (c *Client) releaseIdleConnectionsUntilClosed() { + for { + select { + case <-time.After(releaseIdleConnsCheckFrequency): + c.releaseIdleConnections() + case <-c.closed: + return + } + } +} + +func (c *Client) releaseIdleConnections() { + var toClose []io.Closer + + // Nothing to do if min idle connections headroom is disabled (negative value). + minIdleHeadroomPercentage := c.MinIdleConnsHeadroomPercentage + if minIdleHeadroomPercentage < 0 { + return + } + + // Get the recently used connections threshold, falling back to the default one. + recentlyUsedThreshold := c.recentlyUsedConnsThreshold + if recentlyUsedThreshold == 0 { + recentlyUsedThreshold = defaultRecentlyUsedConnsThreshold + } + + c.lk.Lock() + + for addr, freeConnections := range c.freeconn { + numIdle := 0 + + // Count the number of idle connections. Since the least used connections are at the beginning + // of the list, we can stop searching as soon as we find a non-idle connection. + for _, freeConn := range freeConnections { + if time.Since(freeConn.idleSince) < recentlyUsedThreshold { + break + } + + numIdle++ + } + + // Compute the number of connections to close. It keeps a number of idle connections equal to + // the configured headroom. + numRecentlyUsed := len(freeConnections) - numIdle + numIdleToKeep := int(math.Max(0, math.Ceil(float64(numRecentlyUsed)*minIdleHeadroomPercentage/100))) + numIdleToClose := numIdle - numIdleToKeep + if numIdleToClose <= 0 { + continue + } + + // Close idle connections. + for i := 0; i < numIdleToClose; i++ { + toClose = append(toClose, freeConnections[i].nc) + } + c.freeconn[addr] = c.freeconn[addr][numIdleToClose:] + } + + // Release the lock and then close the connections. + c.lk.Unlock() + + for _, freeConn := range toClose { + freeConn.Close() + } +} + +// ConnectTimeoutError is the error type used when it takes +// too long to connect to the desired host. This level of +// detail can generally be ignored. +type ConnectTimeoutError struct { + Addr net.Addr +} + +func (cte *ConnectTimeoutError) Error() string { + return "memcache: connect timeout to " + cte.Addr.String() +} + +func (c *Client) dial(addr net.Addr) (net.Conn, error) { + dialTimeout := c.DialTimeout + if dialTimeout == nil { + dialTimeout = net.DialTimeout + } + nc, err := dialTimeout(addr.Network(), addr.String(), c.connectTimeout()) + if err == nil { + return nc, nil + } + + if ne, ok := err.(net.Error); ok && ne.Timeout() { + return nil, &ConnectTimeoutError{addr} + } + + return nil, err +} + +func (c *Client) getConn(addr net.Addr) (*conn, error) { + var ( + writer *bufio.Writer + reader *bufio.Reader + ) + + cn, ok := c.getFreeConn(addr) + if ok { + cn.extendDeadline() + return cn, nil + } + nc, err := c.dial(addr) + if err != nil { + return nil, err + } + + // Init buffered writer. + if c.WriteBufferSizeBytes > 0 { + writer = bufio.NewWriterSize(nc, c.WriteBufferSizeBytes) + } else { + writer = bufio.NewWriter(nc) + } + + // Init buffered reader. + if c.ReadBufferSizeBytes > 0 { + reader = bufio.NewReaderSize(nc, c.ReadBufferSizeBytes) + } else { + reader = bufio.NewReader(nc) + } + + cn = &conn{ + nc: nc, + addr: addr, + rw: bufio.NewReadWriter(reader, writer), + c: c, + } + cn.extendDeadline() + return cn, nil +} + +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { + addr, err := c.selector.PickServer(item.Key) + if err != nil { + return err + } + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + if err = fn(c, cn.rw, item); err != nil { + return err + } + return nil +} + +func (c *Client) FlushAll() error { + return c.selector.Each(c.flushAllFromAddr) +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a +// memcache cache miss. The key must be at most 250 bytes in length. +func (c *Client) Get(key string, opts ...Option) (item *Item, err error) { + options := newOptions(opts...) + err = c.withKeyAddr(key, func(addr net.Addr) error { + return c.getFromAddr(addr, []string{key}, options, func(it *Item) { item = it }) + }) + if err == nil && item == nil { + err = ErrCacheMiss + } + return +} + +// Touch updates the expiry for the given key. The seconds parameter is either +// a Unix timestamp or, if seconds is less than 1 month, the number of seconds +// into the future at which time the item will expire. Zero means the item has +// no expiration time. ErrCacheMiss is returned if the key is not in the cache. +// The key must be at most 250 bytes in length. +func (c *Client) Touch(key string, seconds int32) (err error) { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.touchFromAddr(addr, []string{key}, seconds) + }) +} + +func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { + if !legalKey(key) { + return ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return err + } + return fn(addr) +} + +func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + return fn(cn.rw) +} + +func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.withAddrRw(addr, fn) + }) +} + +func (c *Client) getFromAddr(addr net.Addr, keys []string, opts *Options, cb func(*Item)) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + if err := c.parseGetResponse(rw.Reader, opts, cb); err != nil { + return err + } + return nil + }) +} + +// flushAllFromAddr send the flush_all command to the given addr +func (c *Client) flushAllFromAddr(addr net.Addr) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOk): + break + default: + return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line)) + } + return nil + }) +} + +// ping sends the version command to the given addr +func (c *Client) ping(addr net.Addr) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + + switch { + case bytes.HasPrefix(line, versionPrefix): + break + default: + return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line)) + } + return nil + }) +} + +func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + for _, key := range keys { + if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultTouched): + break + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + default: + return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line)) + } + } + return nil + }) +} + +// GetMulti is a batch version of Get. The returned map from keys to +// items may have fewer elements than the input slice, due to memcache +// cache misses. Each key must be at most 250 bytes in length. +// If no error is returned, the returned map will also be non-nil. +func (c *Client) GetMulti(keys []string, opts ...Option) (map[string]*Item, error) { + options := newOptions(opts...) + + var lk sync.Mutex + m := make(map[string]*Item) + addItemToMap := func(it *Item) { + lk.Lock() + defer lk.Unlock() + m[it.Key] = it + } + + keyMap := make(map[net.Addr][]string) + for _, key := range keys { + if !legalKey(key) { + return nil, ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return nil, err + } + keyMap[addr] = append(keyMap[addr], key) + } + + ch := make(chan error, buffered) + for addr, keys := range keyMap { + go func(addr net.Addr, keys []string) { + err := c.getFromAddr(addr, keys, options, addItemToMap) + ch <- err + }(addr, keys) + } + + var err error + for range keyMap { + if ge := <-ch; ge != nil { + err = ge + } + } + return m, err +} + +// parseGetResponse reads a GET response from r and calls cb for each +// read and allocated Item +func (c *Client) parseGetResponse(r *bufio.Reader, opts *Options, cb func(*Item)) error { + for { + line, err := r.ReadSlice('\n') + if err != nil { + return err + } + if bytes.Equal(line, resultEnd) { + return nil + } + it := new(Item) + size, err := scanGetResponseLine(line, it) + if err != nil { + return err + } + buffSize := size + 2 + buff := opts.Alloc.Get(buffSize) + it.Value = (*buff)[:buffSize] + _, err = io.ReadFull(r, it.Value) + if err != nil { + opts.Alloc.Put(buff) + return err + } + if !bytes.HasSuffix(it.Value, crlf) { + opts.Alloc.Put(buff) + return fmt.Errorf("memcache: corrupt get result read") + } + it.Value = it.Value[:size] + cb(it) + } +} + +// scanGetResponseLine populates it and returns the declared size of the item. +// It does not read the bytes of the item. +func scanGetResponseLine(line []byte, it *Item) (size int, err error) { + errf := func(line []byte) (int, error) { + return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) + } + if !bytes.HasPrefix(line, valuePrefix) || !bytes.HasSuffix(line, []byte("\r\n")) { + return errf(line) + } + s := string(line[6 : len(line)-2]) + var rest string + var found bool + it.Key, rest, found = cut(s, ' ') + if !found { + return errf(line) + } + val, rest, found := cut(rest, ' ') + if !found { + return errf(line) + } + flags64, err := strconv.ParseUint(val, 10, 32) + if err != nil { + return errf(line) + } + it.Flags = uint32(flags64) + val, rest, found = cut(rest, ' ') + size64, err := strconv.ParseUint(val, 10, 32) + if err != nil { + return errf(line) + } + if !found { // final CAS ID is optional. + return int(size64), nil + } + it.casid, err = strconv.ParseUint(rest, 10, 64) + if err != nil { + return errf(line) + } + return int(size64), nil +} + +// Similar to strings.Cut in Go 1.18, but sep can only be 1 byte. +func cut(s string, sep byte) (before, after string, found bool) { + if i := strings.IndexByte(s, sep); i >= 0 { + return s[:i], s[i+1:], true + } + return s, "", false +} + +// Set writes the given item, unconditionally. +func (c *Client) Set(item *Item) error { + return c.onItem(item, (*Client).set) +} + +func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "set", item) +} + +// Add writes the given item, if no value already exists for its +// key. ErrNotStored is returned if that condition is not met. +func (c *Client) Add(item *Item) error { + return c.onItem(item, (*Client).add) +} + +func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "add", item) +} + +// Replace writes the given item, but only if the server *does* +// already hold data for this key +func (c *Client) Replace(item *Item) error { + return c.onItem(item, (*Client).replace) +} + +func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "replace", item) +} + +// CompareAndSwap writes the given item that was previously returned +// by Get, if the value was neither modified or evicted between the +// Get and the CompareAndSwap calls. The item's Key should not change +// between calls but all other item fields may differ. ErrCASConflict +// is returned if the value was modified in between the +// calls. ErrNotStored is returned if the value was evicted in between +// the calls. +func (c *Client) CompareAndSwap(item *Item) error { + return c.onItem(item, (*Client).cas) +} + +func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "cas", item) +} + +func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { + if !legalKey(item.Key) { + return ErrMalformedKey + } + var err error + if verb == "cas" { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid) + } else { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value)) + } + if err != nil { + return err + } + if _, err = rw.Write(item.Value); err != nil { + return err + } + if _, err := rw.Write(crlf); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultStored): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) +} + +func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { + _, err := fmt.Fprintf(rw, format, args...) + if err != nil { + return nil, err + } + if err := rw.Flush(); err != nil { + return nil, err + } + line, err := rw.ReadSlice('\n') + return line, err +} + +func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { + line, err := writeReadLine(rw, format, args...) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOK): + return nil + case bytes.Equal(line, expect): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line: %q", string(line)) +} + +// Delete deletes the item with the provided key. The error ErrCacheMiss is +// returned if the item didn't already exist in the cache. +func (c *Client) Delete(key string) error { + return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) + }) +} + +// DeleteAll deletes all items in the cache. +func (c *Client) DeleteAll() error { + return c.withKeyRw("", func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "flush_all\r\n") + }) +} + +// Ping checks all instances if they are alive. Returns error if any +// of them is down. +func (c *Client) Ping() error { + return c.selector.Each(c.ping) +} + +// Increment atomically increments key by delta. The return value is +// the new value after being incremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On 64-bit overflow, the new value wraps around. +func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("incr", key, delta) +} + +// Decrement atomically decrements key by delta. The return value is +// the new value after being decremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On underflow, the new value is capped at zero and does not wrap +// around. +func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("decr", key, delta) +} + +func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { + var val uint64 + err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + case bytes.HasPrefix(line, resultClientErrorPrefix): + errMsg := line[len(resultClientErrorPrefix) : len(line)-2] + return errors.New("memcache: client error: " + string(errMsg)) + } + val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) + if err != nil { + return err + } + return nil + }) + return val, err +} diff --git a/vendor/github.com/grafana/gomemcache/memcache/options.go b/vendor/github.com/grafana/gomemcache/memcache/options.go new file mode 100644 index 00000000..ec0b3841 --- /dev/null +++ b/vendor/github.com/grafana/gomemcache/memcache/options.go @@ -0,0 +1,59 @@ +package memcache + +var nopAllocator = &defaultAllocator{} + +func newOptions(opts ...Option) *Options { + o := &Options{ + Alloc: nopAllocator, + } + + for _, opt := range opts { + opt(o) + } + + return o +} + +// Options are used to modify the behavior of an individual Get or GetMulti +// call made by the Client. They are constructed by applying Option callbacks +// passed to a Client method to a default Options instance. +type Options struct { + Alloc Allocator +} + +// Option is a callback used to modify the Options that a particular Client +// method uses. +type Option func(opts *Options) + +// WithAllocator creates a new Option that makes use of a specific memory Allocator +// for result values (Item.Value) loaded from memcached. +func WithAllocator(alloc Allocator) Option { + return func(opts *Options) { + opts.Alloc = alloc + } +} + +// Allocator allows memory for memcached result values (Item.Value) to be managed by +// callers of the Client instead of by the Client itself. For example, this can be +// used by callers to implement arena-style memory management. The default implementation +// used, when not otherwise overridden, uses `make` and relies on GC for cleanup. +type Allocator interface { + // Get returns a byte slice with at least sz capacity. Length of the slice is + // not guaranteed and so must be asserted by callers (the Client). + Get(sz int) *[]byte + // Put returns the byte slice to the underlying allocator. The Client will + // only call this method during error handling when allocated values are not + // returned to the caller as cache results. + Put(b *[]byte) +} + +type defaultAllocator struct{} + +func (d defaultAllocator) Get(sz int) *[]byte { + b := make([]byte, sz) + return &b +} + +func (d defaultAllocator) Put(_ *[]byte) { + // no-op +} diff --git a/vendor/github.com/grafana/gomemcache/memcache/selector.go b/vendor/github.com/grafana/gomemcache/memcache/selector.go new file mode 100644 index 00000000..89ad81e0 --- /dev/null +++ b/vendor/github.com/grafana/gomemcache/memcache/selector.go @@ -0,0 +1,129 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memcache + +import ( + "hash/crc32" + "net" + "strings" + "sync" +) + +// ServerSelector is the interface that selects a memcache server +// as a function of the item's key. +// +// All ServerSelector implementations must be safe for concurrent use +// by multiple goroutines. +type ServerSelector interface { + // PickServer returns the server address that a given item + // should be shared onto. + PickServer(key string) (net.Addr, error) + Each(func(net.Addr) error) error +} + +// ServerList is a simple ServerSelector. Its zero value is usable. +type ServerList struct { + mu sync.RWMutex + addrs []net.Addr +} + +// staticAddr caches the Network() and String() values from any net.Addr. +type staticAddr struct { + ntw, str string +} + +func newStaticAddr(a net.Addr) net.Addr { + return &staticAddr{ + ntw: a.Network(), + str: a.String(), + } +} + +func (s *staticAddr) Network() string { return s.ntw } +func (s *staticAddr) String() string { return s.str } + +// SetServers changes a ServerList's set of servers at runtime and is +// safe for concurrent use by multiple goroutines. +// +// Each server is given equal weight. A server is given more weight +// if it's listed multiple times. +// +// SetServers returns an error if any of the server names fail to +// resolve. No attempt is made to connect to the server. If any error +// is returned, no changes are made to the ServerList. +func (ss *ServerList) SetServers(servers ...string) error { + naddr := make([]net.Addr, len(servers)) + for i, server := range servers { + if strings.Contains(server, "/") { + addr, err := net.ResolveUnixAddr("unix", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(addr) + } else { + tcpaddr, err := net.ResolveTCPAddr("tcp", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(tcpaddr) + } + } + + ss.mu.Lock() + defer ss.mu.Unlock() + ss.addrs = naddr + return nil +} + +// Each iterates over each server calling the given function +func (ss *ServerList) Each(f func(net.Addr) error) error { + ss.mu.RLock() + defer ss.mu.RUnlock() + for _, a := range ss.addrs { + if err := f(a); nil != err { + return err + } + } + return nil +} + +// keyBufPool returns []byte buffers for use by PickServer's call to +// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the +// copies, which at least are bounded in size and small) +var keyBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 256) + return &b + }, +} + +func (ss *ServerList) PickServer(key string) (net.Addr, error) { + ss.mu.RLock() + defer ss.mu.RUnlock() + if len(ss.addrs) == 0 { + return nil, ErrNoServers + } + if len(ss.addrs) == 1 { + return ss.addrs[0], nil + } + bufp := keyBufPool.Get().(*[]byte) + n := copy(*bufp, key) + cs := crc32.ChecksumIEEE((*bufp)[:n]) + keyBufPool.Put(bufp) + + return ss.addrs[cs%uint32(len(ss.addrs))], nil +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/batch.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/batch.go index 8826b869..8681b67b 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/batch.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/batch.go @@ -52,7 +52,7 @@ func newBatch(maxStreams int, entries ...api.Entry) *batch { // add an entry to the batch func (b *batch) add(entry api.Entry) error { - b.bytes += len(entry.Line) + b.bytes += entrySize(entry) // Append the entry to an already existing stream (if any) labels := labelsMapToString(entry.Labels, ReservedLabelTenantID) @@ -113,7 +113,7 @@ func (b *batch) sizeBytes() int { // sizeBytesAfter returns the size of the batch after the input entry // will be added to the batch itself func (b *batch) sizeBytesAfter(entry api.Entry) int { - return b.bytes + len(entry.Line) + return b.bytes + entrySize(entry) } // age of the batch since its creation @@ -146,3 +146,11 @@ func (b *batch) createPushRequest() (*logproto.PushRequest, int) { } return &req, entriesCount } + +func entrySize(entry api.Entry) int { + structuredMetadataSize := 0 + for _, label := range entry.StructuredMetadata { + structuredMetadataSize += label.Size() + } + return len(entry.Line) + structuredMetadataSize +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client.go index 719d157d..4dfd1136 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client.go @@ -186,9 +186,6 @@ type Tripperware func(http.RoundTripper) http.RoundTripper // New makes a new Client. func New(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger) (Client, error) { - if cfg.StreamLagLabels.String() != "" { - return nil, fmt.Errorf("client config stream_lag_labels is deprecated and the associated metric has been removed, stream_lag_labels: %+v", cfg.StreamLagLabels.String()) - } return newClient(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger) } @@ -197,6 +194,9 @@ func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLin if cfg.URL.URL == nil { return nil, errors.New("client needs target URL") } + if metrics == nil { + return nil, errors.New("metrics must be instantiated") + } ctx, cancel := context.WithCancel(context.Background()) @@ -442,11 +442,10 @@ func (c *client) sendBatch(tenantID string, batch *batch) { func (c *client) send(ctx context.Context, tenantID string, buf []byte) (int, error) { ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout) defer cancel() - req, err := http.NewRequest("POST", c.cfg.URL.String(), bytes.NewReader(buf)) + req, err := http.NewRequestWithContext(ctx, "POST", c.cfg.URL.String(), bytes.NewReader(buf)) if err != nil { return -1, err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", contentType) req.Header.Set("User-Agent", UserAgent) diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client_writeto.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client_writeto.go new file mode 100644 index 00000000..6fa549df --- /dev/null +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/client_writeto.go @@ -0,0 +1,89 @@ +package client + +import ( + "fmt" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + + "github.com/grafana/loki/clients/pkg/promtail/api" + + "github.com/grafana/loki/pkg/ingester/wal" + "github.com/grafana/loki/pkg/util" +) + +// clientWriteTo implements a wal.WriteTo that re-builds entries with the stored series, and the received entries. After, +// sends each to the provided Client channel. +type clientWriteTo struct { + series map[chunks.HeadSeriesRef]model.LabelSet + seriesLock sync.RWMutex + + // seriesSegment keeps track of the last segment in which the series pointed by each key in this map was seen. Keeping + // this in a separate map avoids unnecessary contention. + // + // Even though it doesn't present a difference right now according to benchmarks, it will help when we introduce other + // calls from the wal.Watcher to the wal.WriteTo like `UpdateSeriesSegment`. + seriesSegment map[chunks.HeadSeriesRef]int + seriesSegmentLock sync.RWMutex + + logger log.Logger + toClient chan<- api.Entry +} + +// newClientWriteTo creates a new clientWriteTo +func newClientWriteTo(toClient chan<- api.Entry, logger log.Logger) *clientWriteTo { + return &clientWriteTo{ + series: make(map[chunks.HeadSeriesRef]model.LabelSet), + seriesSegment: make(map[chunks.HeadSeriesRef]int), + toClient: toClient, + logger: logger, + } +} + +func (c *clientWriteTo) StoreSeries(series []record.RefSeries, segment int) { + c.seriesLock.Lock() + defer c.seriesLock.Unlock() + c.seriesSegmentLock.Lock() + defer c.seriesSegmentLock.Unlock() + for _, seriesRec := range series { + c.seriesSegment[seriesRec.Ref] = segment + labels := util.MapToModelLabelSet(seriesRec.Labels.Map()) + c.series[seriesRec.Ref] = labels + } +} + +// SeriesReset will delete all cache entries that were last seen in segments numbered equal or lower than segmentNum +func (c *clientWriteTo) SeriesReset(segmentNum int) { + c.seriesLock.Lock() + defer c.seriesLock.Unlock() + c.seriesSegmentLock.Lock() + defer c.seriesSegmentLock.Unlock() + for k, v := range c.seriesSegment { + if v <= segmentNum { + level.Debug(c.logger).Log("msg", fmt.Sprintf("reclaiming series under segment %d", segmentNum)) + delete(c.seriesSegment, k) + delete(c.series, k) + } + } +} + +func (c *clientWriteTo) AppendEntries(entries wal.RefEntries) error { + var entry api.Entry + c.seriesLock.RLock() + l, ok := c.series[entries.Ref] + c.seriesLock.RUnlock() + if ok { + entry.Labels = l + for _, e := range entries.Entries { + entry.Entry = e + c.toClient <- entry + } + } else { + level.Debug(c.logger).Log("msg", "series for entry not found") + } + return nil +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/config.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/config.go index d38aa59b..ab36353b 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/config.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/config.go @@ -23,6 +23,10 @@ const ( // Config describes configuration for an HTTP pusher client. type Config struct { + // Note even though the command line flag arguments which use this config + // are deprecated, this struct is still the primary way to configure + // a promtail client + Name string `yaml:"name,omitempty"` URL flagext.URLValue BatchWait time.Duration `yaml:"batchwait"` @@ -44,8 +48,6 @@ type Config struct { // 429 'Too Many Requests' response from the distributor. Helps // prevent HOL blocking in multitenant deployments. DropRateLimitedBatches bool `yaml:"drop_rate_limited_batches"` - - StreamLagLabels flagext.StringSliceCSV `yaml:"stream_lag_labels" doc:"deprecated"` } // RegisterFlags with prefix registers flags where every name is prefixed by @@ -91,10 +93,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } } + cfg.Client = config.DefaultHTTPClientConfig + if err := unmarshal(&cfg); err != nil { return err } + // explicitly call Validate on HTTPClientConfig as it's UnmarshalYAML + // method doesn't get invoked given that it's not a pointer. + if err := cfg.Client.Validate(); err != nil { + return err + } + *c = Config(cfg) return nil } diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/logger.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/logger.go index bee9fadf..890d5117 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/logger.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/logger.go @@ -9,9 +9,12 @@ import ( "github.com/fatih/color" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "gopkg.in/yaml.v2" "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/clients/pkg/promtail/limit" + "github.com/grafana/loki/clients/pkg/promtail/wal" ) var ( @@ -37,7 +40,7 @@ type logger struct { // NewLogger creates a new client logger that logs entries instead of sending them. func NewLogger(metrics *Metrics, log log.Logger, cfgs ...Config) (Client, error) { // make sure the clients config is valid - c, err := NewMulti(metrics, log, 0, 0, false, cfgs...) + c, err := NewManager(metrics, log, limit.Config{}, prometheus.NewRegistry(), wal.Config{}, NilNotifier, cfgs...) if err != nil { return nil, err } diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/manager.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/manager.go index c6df8391..84dc48de 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/manager.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/manager.go @@ -6,12 +6,37 @@ import ( "sync" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/clients/pkg/promtail/limit" "github.com/grafana/loki/clients/pkg/promtail/wal" ) +// WriterEventsNotifier implements a notifier that's received by the Manager, to which wal.Watcher can subscribe for +// writer events. +type WriterEventsNotifier interface { + SubscribeCleanup(subscriber wal.CleanupEventSubscriber) + SubscribeWrite(subscriber wal.WriteEventSubscriber) +} + +var ( + // NilNotifier is a no-op WriterEventsNotifier. + NilNotifier = nilNotifier{} +) + +// nilNotifier implements WriterEventsNotifier with no-ops callbacks. +type nilNotifier struct{} + +func (n nilNotifier) SubscribeCleanup(_ wal.CleanupEventSubscriber) {} + +func (n nilNotifier) SubscribeWrite(_ wal.WriteEventSubscriber) {} + +type Stoppable interface { + Stop() +} + // Manager manages remote write client instantiation, and connects the related components to orchestrate the flow of api.Entry // from the scrape targets, to the remote write clients themselves. // @@ -19,7 +44,9 @@ import ( // work, tracked in https://github.com/grafana/loki/issues/8197, this Manager will be responsible for instantiating all client // types: Logger, Multi and WAL. type Manager struct { - clients []Client + name string + clients []Client + walWatchers []Stoppable entries chan api.Entry once sync.Once @@ -28,17 +55,20 @@ type Manager struct { } // NewManager creates a new Manager -func NewManager(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int, maxLineSizeTruncate bool, reg prometheus.Registerer, walCfg wal.Config, clientCfgs ...Config) (*Manager, error) { - // TODO: refactor this to instantiate all clients types +func NewManager(metrics *Metrics, logger log.Logger, limits limit.Config, reg prometheus.Registerer, walCfg wal.Config, notifier WriterEventsNotifier, clientCfgs ...Config) (*Manager, error) { var fake struct{} + watcherMetrics := wal.NewWatcherMetrics(reg) + if len(clientCfgs) == 0 { - return nil, fmt.Errorf("at least one client config should be provided") + return nil, fmt.Errorf("at least one client config must be provided") } + clientsCheck := make(map[string]struct{}) clients := make([]Client, 0, len(clientCfgs)) + watchers := make([]Stoppable, 0, len(clientCfgs)) for _, cfg := range clientCfgs { - client, err := New(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger) + client, err := New(metrics, cfg, limits.MaxStreams, limits.MaxLineSize.Val(), limits.MaxLineSizeTruncate, logger) if err != nil { return nil, err } @@ -50,23 +80,66 @@ func NewManager(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int clientsCheck[client.Name()] = fake clients = append(clients, client) - } + if walCfg.Enabled { + // Create and launch wal watcher for this client + + // add some context information for the logger the watcher uses + wlog := log.With(logger, "client", client.Name()) + + writeTo := newClientWriteTo(client.Chan(), wlog) + // subscribe watcher's wal.WriteTo to writer events. This will make the writer trigger the cleanup of the wal.WriteTo + // series cache whenever a segment is deleted. + notifier.SubscribeCleanup(writeTo) + + watcher := wal.NewWatcher(walCfg.Dir, client.Name(), watcherMetrics, writeTo, wlog, walCfg.WatchConfig) + // subscribe watcher to wal write events + notifier.SubscribeWrite(watcher) + + level.Debug(logger).Log("msg", "starting WAL watcher for client", "client", client.Name()) + watcher.Start() + + watchers = append(watchers, watcher) + } + } manager := &Manager{ - clients: clients, - entries: make(chan api.Entry), + clients: clients, + walWatchers: watchers, + entries: make(chan api.Entry), + } + if walCfg.Enabled { + manager.name = "wal" + manager.startWithConsume() + } else { + manager.name = "multi" + manager.startWithForward() } - manager.start() return manager, nil } -func (m *Manager) start() { +// startWithConsume starts the main manager routine, which reads and discards entries from the exposed channel. +// This is necessary since to treat the WAL-enabled manager the same way as the WAL-disabled one, the processing pipeline +// send entries both to the WAL writer, and the channel exposed by the manager. In the case the WAL is enabled, these entries +// are not used since they are read from the WAL, so we need a routine to just read the entries received through the channel +// and discarding them, to not block the sending side. +func (m *Manager) startWithConsume() { + m.wg.Add(1) + go func() { + defer m.wg.Done() + // discard read entries + //nolint:revive + for range m.entries { + } + }() +} + +// startWithForward starts the main manager routine, which reads entries from the exposed channel, and forwards them +// doing a fan-out across all inner clients. +func (m *Manager) startWithForward() { m.wg.Add(1) go func() { defer m.wg.Done() - // keep reading received entries for e := range m.entries { - // then fanout to every remote write client for _, c := range m.clients { c.Chan() <- e } @@ -82,8 +155,8 @@ func (m *Manager) StopNow() { func (m *Manager) Name() string { var sb strings.Builder - // name contains wal since manager is used as client only when WAL enabled for now - sb.WriteString("wal:") + sb.WriteString(m.name) + sb.WriteString(":") for i, c := range m.clients { sb.WriteString(c.Name()) if i != len(m.clients)-1 { @@ -101,6 +174,10 @@ func (m *Manager) Stop() { // first stop the receiving channel m.once.Do(func() { close(m.entries) }) m.wg.Wait() + // close wal watchers + for _, walWatcher := range m.walWatchers { + walWatcher.Stop() + } // close clients for _, c := range m.clients { c.Stop() diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/multi.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/multi.go deleted file mode 100644 index 35eb05fa..00000000 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/multi.go +++ /dev/null @@ -1,96 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/go-kit/log" - - "github.com/grafana/loki/clients/pkg/promtail/api" -) - -// MultiClient is client pushing to one or more loki instances. -type MultiClient struct { - clients []Client - entries chan api.Entry - wg sync.WaitGroup - - once sync.Once -} - -// NewMulti creates a new client -func NewMulti(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int, maxLineSizeTruncate bool, cfgs ...Config) (Client, error) { - var fake struct{} - - if len(cfgs) == 0 { - return nil, errors.New("at least one client config should be provided") - } - clientsCheck := make(map[string]struct{}) - clients := make([]Client, 0, len(cfgs)) - for _, cfg := range cfgs { - client, err := New(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger) - if err != nil { - return nil, err - } - - // Don't allow duplicate clients, we have client specific metrics that need at least one unique label value (name). - if _, ok := clientsCheck[client.Name()]; ok { - return nil, fmt.Errorf("duplicate client configs are not allowed, found duplicate for URL: %s", cfg.URL) - } - - clientsCheck[client.Name()] = fake - clients = append(clients, client) - } - multi := &MultiClient{ - clients: clients, - entries: make(chan api.Entry), - } - multi.start() - return multi, nil -} - -func (m *MultiClient) start() { - m.wg.Add(1) - go func() { - defer m.wg.Done() - for e := range m.entries { - for _, c := range m.clients { - c.Chan() <- e - } - } - }() -} - -func (m *MultiClient) Chan() chan<- api.Entry { - return m.entries -} - -// Stop implements Client -func (m *MultiClient) Stop() { - m.once.Do(func() { close(m.entries) }) - m.wg.Wait() - for _, c := range m.clients { - c.Stop() - } -} - -// StopNow implements Client -func (m *MultiClient) StopNow() { - for _, c := range m.clients { - c.StopNow() - } -} - -func (m *MultiClient) Name() string { - var sb strings.Builder - sb.WriteString("multi:") - for i, c := range m.clients { - sb.WriteString(c.Name()) - if i != len(m.clients)-1 { - sb.WriteString(",") - } - } - return sb.String() -} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/test_server.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/client/test_server.go deleted file mode 100644 index 3883173f..00000000 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/client/test_server.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "math" - "net/http" - "net/http/httptest" - - "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/util" -) - -type receivedReq struct { - tenantID string - pushReq logproto.PushRequest -} - -// newTestMoreWriteServer creates a new httpserver.Server that can handle remote write request. When a request is handled, -// the received entries are written to receivedChan, and status is responded. -func newTestRemoteWriteServer(receivedChan chan receivedReq, status int) *httptest.Server { - server := httptest.NewServer(createServerHandler(receivedChan, status)) - return server -} - -func createServerHandler(receivedReqsChan chan receivedReq, receivedOKStatus int) http.HandlerFunc { - return func(rw http.ResponseWriter, req *http.Request) { - // Parse the request - var pushReq logproto.PushRequest - if err := util.ParseProtoReader(req.Context(), req.Body, int(req.ContentLength), math.MaxInt32, &pushReq, util.RawSnappy); err != nil { - rw.WriteHeader(500) - return - } - - receivedReqsChan <- receivedReq{ - tenantID: req.Header.Get("X-Scope-OrgID"), - pushReq: pushReq, - } - - rw.WriteHeader(receivedOKStatus) - } -} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/limit/config.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/limit/config.go new file mode 100644 index 00000000..02589afd --- /dev/null +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/limit/config.go @@ -0,0 +1,27 @@ +package limit + +import ( + "flag" + + "github.com/grafana/loki/pkg/util/flagext" +) + +type Config struct { + ReadlineRate float64 `mapstructure:"readline_rate" yaml:"readline_rate" json:"readline_rate"` + ReadlineBurst int `mapstructure:"readline_burst" yaml:"readline_burst" json:"readline_burst"` + ReadlineRateEnabled bool `mapstructure:"readline_rate_enabled,omitempty" yaml:"readline_rate_enabled,omitempty" json:"readline_rate_enabled"` + ReadlineRateDrop bool `mapstructure:"readline_rate_drop,omitempty" yaml:"readline_rate_drop,omitempty" json:"readline_rate_drop"` + MaxStreams int `mapstructure:"max_streams" yaml:"max_streams" json:"max_streams"` + MaxLineSize flagext.ByteSize `mapstructure:"max_line_size" yaml:"max_line_size" json:"max_line_size"` + MaxLineSizeTruncate bool `mapstructure:"max_line_size_truncate" yaml:"max_line_size_truncate" json:"max_line_size_truncate"` +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.Float64Var(&cfg.ReadlineRate, prefix+"limit.readline-rate", 10000, "The rate limit in log lines per second that this instance of Promtail may push to Loki.") + f.IntVar(&cfg.ReadlineBurst, prefix+"limit.readline-burst", 10000, "The cap in the quantity of burst lines that this instance of Promtail may push to Loki.") + f.BoolVar(&cfg.ReadlineRateEnabled, prefix+"limit.readline-rate-enabled", false, "When true, enforces rate limiting on this instance of Promtail.") + f.BoolVar(&cfg.ReadlineRateDrop, prefix+"limit.readline-rate-drop", true, "When true, exceeding the rate limit causes this instance of Promtail to discard log lines, rather than sending them to Loki.") + f.IntVar(&cfg.MaxStreams, prefix+"max-streams", 0, "Maximum number of active streams. 0 to disable.") + f.Var(&cfg.MaxLineSize, prefix+"max-line-size", "Maximum log line byte size allowed without dropping. Example: 256kb, 2M. 0 to disable.") + f.BoolVar(&cfg.MaxLineSizeTruncate, prefix+"max-line-size-truncate", false, "Whether to truncate lines that exceed max_line_size. No effect if max_line_size is disabled") +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/config.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/config.go index e01090cb..a3ba686f 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/config.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/config.go @@ -8,6 +8,12 @@ const ( defaultMaxSegmentAge = time.Hour ) +// DefaultWatchConfig is the opinionated defaults for operating the Watcher. +var DefaultWatchConfig = WatchConfig{ + MinReadFrequency: time.Millisecond * 250, + MaxReadFrequency: time.Second, +} + // Config contains all WAL-related settings. type Config struct { // Whether WAL-support should be enabled. @@ -23,12 +29,31 @@ type Config struct { // // Note that this functionality will likely be deprecated in favour of a programmatic cleanup mechanism. MaxSegmentAge time.Duration `yaml:"cleanSegmentsOlderThan"` + + WatchConfig WatchConfig `yaml:"watchConfig"` +} + +// WatchConfig allows the user to configure the Watcher. +// +// For the read frequency settings, the Watcher polls the WAL for new records with two mechanisms: First, it gets +// notified by the Writer when the WAL is written; also, it has a timer that gets fired every so often. This last +// one, implements and exponential back-off strategy to prevent the Watcher from doing read too often, if there's no new +// data. +type WatchConfig struct { + // MinReadFrequency controls the minimum read frequency the Watcher polls the WAL for new records. If the poll is successful, + // the frequency will remain the same. If not, it will be incremented using an exponential backoff. + MinReadFrequency time.Duration `yaml:"minReadFrequency"` + + // MaxReadFrequency controls the maximum read frequency the Watcher polls the WAL for new records. As mentioned above + // it caps the polling frequency to a maximum, to prevent to exponential backoff from making it too high. + MaxReadFrequency time.Duration `yaml:"maxReadFrequency"` } // UnmarshalYAML implement YAML Unmarshaler func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // Apply defaults c.MaxSegmentAge = defaultMaxSegmentAge + c.WatchConfig = DefaultWatchConfig type plain Config return unmarshal((*plain)(c)) } diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/reader.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/reader.go index 406a2b71..b19b2bbe 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/reader.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/reader.go @@ -14,11 +14,11 @@ import ( // ReadWAL will read all entries in the WAL located under dir. Mainly used for testing func ReadWAL(dir string) ([]api.Entry, error) { - reader, close, err := walUtils.NewWalReader(dir, -1) + reader, closeFn, err := walUtils.NewWalReader(dir, -1) if err != nil { return nil, err } - defer func() { close.Close() }() + defer func() { closeFn.Close() }() seenSeries := make(map[uint64]model.LabelSet) seenEntries := []api.Entry{} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/timer.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/timer.go new file mode 100644 index 00000000..bd646cc9 --- /dev/null +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/timer.go @@ -0,0 +1,50 @@ +package wal + +import "time" + +// backoffTimer is a time.Timer that allows one to move between a minimum and maximum interval, using an exponential backoff +// strategy. It safely re-uses just one time.Timer instance internally. +type backoffTimer struct { + timer *time.Timer + curr, min, max time.Duration + C <-chan time.Time +} + +func newBackoffTimer(min, max time.Duration) *backoffTimer { + // note that the first timer created will be stopped without ever consuming it, since it's once we can omit it + // since the timer is recycled, we can keep the channel + t := time.NewTimer(min) + return &backoffTimer{ + timer: t, + min: min, + max: max, + curr: min, + C: t.C, + } +} + +func (bt *backoffTimer) backoff() { + bt.curr = bt.curr * 2 + if bt.curr > bt.max { + bt.curr = bt.max + } + bt.recycle() +} + +func (bt *backoffTimer) reset() { + bt.curr = bt.min + bt.recycle() +} + +// recycle stops and attempts to drain the time.Timer underlying channel, in order to fully recycle the instance. +func (bt *backoffTimer) recycle() { + if !bt.timer.Stop() { + // attempt to drain timer's channel if it has expired + select { + case <-bt.timer.C: + default: + } + } + // safe to call reset after checking stopping and draining the timer + bt.timer.Reset(bt.curr) +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/wal.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/wal.go index 7129f282..af1fa7e3 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/wal.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/wal.go @@ -37,7 +37,7 @@ type wrapper struct { func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) { // TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk // yet. This will attest for the lack of buffering in the channel Writer exposes. - tsdbWAL, err := wlog.NewSize(log, registerer, cfg.Dir, wlog.DefaultSegmentSize, false) + tsdbWAL, err := wlog.NewSize(log, registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone) if err != nil { return nil, fmt.Errorf("failde to create tsdb WAL: %w", err) } @@ -86,10 +86,7 @@ func (w *wrapper) logBatched(record *wal.Record) error { *seriesBuf = record.EncodeSeries(*seriesBuf) *entriesBuf = record.EncodeEntries(wal.CurrentEntriesRec, *entriesBuf) // Always write series then entries - if err := w.wal.Log(*seriesBuf, *entriesBuf); err != nil { - return err - } - return nil + return w.wal.Log(*seriesBuf, *entriesBuf) } // logSingle logs to the WAL series and records in separate WAL operation. This causes a page flush after each operation. diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher.go new file mode 100644 index 00000000..3e8719a2 --- /dev/null +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher.go @@ -0,0 +1,353 @@ +package wal + +import ( + "fmt" + "io" + "math" + "os" + "strconv" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wlog" + + "github.com/grafana/loki/pkg/ingester/wal" +) + +const ( + segmentCheckPeriod = 100 * time.Millisecond + + // debug flag used for developing and testing the watcher code. Using this instead of level.Debug to avoid checking + // the log level inwards into the logger code, and just making the compiler omit this code. + debug = false +) + +// Based in the implementation of prometheus WAL watcher +// https://github.com/prometheus/prometheus/blob/main/tsdb/wlog/watcher.go. Includes some changes to make it suitable +// for log WAL entries, but also, the writeTo surface has been implemented according to the actions necessary for +// Promtail's WAL. + +// Reader is a dependency interface to inject generic WAL readers into the Watcher. +type Reader interface { + Next() bool + Err() error + Record() []byte +} + +// WriteCleanup is responsible for cleaning up resources used in the process of reading the WAL. +type WriteCleanup interface { + // SeriesReset is called to notify that segments have been deleted. The argument of the call + // means that all segments with a number lower or equal than segmentNum are safe to be reclaimed. + SeriesReset(segmentNum int) +} + +// WriteTo is an interface used by the Watcher to send the samples it's read from the WAL on to +// somewhere else, or clean them up. It's the intermediary between all information read by the Watcher +// and the final destination. +// +// Based on https://github.com/prometheus/prometheus/blob/main/tsdb/wlog/watcher.go#L46 +type WriteTo interface { + // WriteCleanup is used to allow the Watcher to react upon being notified of WAL cleanup events, such as segments + // being reclaimed. + WriteCleanup + + // StoreSeries is called when series are found in WAL entries by the watcher, alongside with the segmentNum they were + // found in. + StoreSeries(series []record.RefSeries, segmentNum int) + + AppendEntries(entries wal.RefEntries) error +} + +type Watcher struct { + // id identifies the Watcher. Used when one Watcher is instantiated per remote write client, to be able to track to whom + // the metric/log line corresponds. + id string + + actions WriteTo + readNotify chan struct{} + done chan struct{} + quit chan struct{} + walDir string + logger log.Logger + MaxSegment int + + metrics *WatcherMetrics + minReadFreq time.Duration + maxReadFreq time.Duration +} + +// NewWatcher creates a new Watcher. +func NewWatcher(walDir, id string, metrics *WatcherMetrics, writeTo WriteTo, logger log.Logger, config WatchConfig) *Watcher { + return &Watcher{ + walDir: walDir, + id: id, + actions: writeTo, + readNotify: make(chan struct{}), + quit: make(chan struct{}), + done: make(chan struct{}), + MaxSegment: -1, + logger: logger, + metrics: metrics, + minReadFreq: config.MinReadFrequency, + maxReadFreq: config.MaxReadFrequency, + } +} + +// Start runs the watcher main loop. +func (w *Watcher) Start() { + w.metrics.watchersRunning.WithLabelValues().Inc() + go w.mainLoop() +} + +// mainLoop retries when there's an error reading a specific segment or advancing one, but leaving a bigger time in-between +// retries. +func (w *Watcher) mainLoop() { + defer close(w.done) + for !isClosed(w.quit) { + if err := w.run(); err != nil { + level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + } + + select { + case <-w.quit: + return + case <-time.After(5 * time.Second): + } + } +} + +// Run the watcher, which will tail the WAL until the quit channel is closed +// or an error case is hit. +func (w *Watcher) run() error { + _, lastSegment, err := w.firstAndLast() + if err != nil { + return fmt.Errorf("wal.Segments: %w", err) + } + + currentSegment := lastSegment + level.Debug(w.logger).Log("msg", "Tailing WAL", "currentSegment", currentSegment, "lastSegment", lastSegment) + for !isClosed(w.quit) { + w.metrics.currentSegment.WithLabelValues(w.id).Set(float64(currentSegment)) + level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + + // On start, we have a pointer to what is the latest segment. On subsequent calls to this function, + // currentSegment will have been incremented, and we should open that segment. + if err := w.watch(currentSegment); err != nil { + return err + } + + // For testing: stop when you hit a specific segment. + if currentSegment == w.MaxSegment { + return nil + } + + currentSegment++ + } + + return nil +} + +// watch will start reading from the segment identified by segmentNum. If an EOF is reached, it will keep +// reading for more WAL records with a wlog.LiveReader. Periodically, it will check if there's a new segment, and if positive +// read the remaining from the current one and return. +func (w *Watcher) watch(segmentNum int) error { + segment, err := wlog.OpenReadSegment(wlog.SegmentName(w.walDir, segmentNum)) + if err != nil { + return err + } + defer segment.Close() + + reader := wlog.NewLiveReader(w.logger, nil, segment) + + readTimer := newBackoffTimer(w.minReadFreq, w.maxReadFreq) + + segmentTicker := time.NewTicker(segmentCheckPeriod) + defer segmentTicker.Stop() + + for { + select { + case <-w.quit: + return nil + + case <-segmentTicker.C: + _, last, err := w.firstAndLast() + if err != nil { + return fmt.Errorf("segments: %w", err) + } + + // Check if new segments exists. + if last <= segmentNum { + continue + } + + // Since we know last > segmentNum, there must be a new segment. Read the remaining from the segmentNum segment + // and return from `watch` to read the next one + _, err = w.readSegment(reader, segmentNum) + if debug { + level.Warn(w.logger).Log("msg", "Error reading segment inside segmentTicker", "segment", segmentNum, "read", reader.Offset(), "err", err) + } + + // io.EOF error are non-fatal since we are tailing the wal + if errors.Cause(err) != io.EOF { + return err + } + + // return after reading the whole segment for creating a new LiveReader from the newly created segment + return nil + + // the cases below will unlock the select block, and execute the block below + // https://github.com/golang/go/issues/23196#issuecomment-353169837 + case <-readTimer.C: + w.metrics.segmentRead.WithLabelValues(w.id, "timer").Inc() + if debug { + level.Debug(w.logger).Log("msg", "Segment read triggered by backup timer", "segment", segmentNum) + + } + case <-w.readNotify: + w.metrics.segmentRead.WithLabelValues(w.id, "notification").Inc() + } + + // read from open segment routine + ok, err := w.readSegment(reader, segmentNum) + if debug { + level.Warn(w.logger).Log("msg", "Error reading segment inside readTicker", "segment", segmentNum, "read", reader.Offset(), "err", err) + } + + // io.EOF error are non-fatal since we are tailing the wal + if errors.Cause(err) != io.EOF { + return err + } + + if ok { + // read ok, reset readTimer to minimum interval + readTimer.reset() + continue + } + + readTimer.backoff() + } +} + +// Read entries from a segment, decode them and dispatch them. +func (w *Watcher) readSegment(r *wlog.LiveReader, segmentNum int) (bool, error) { + var readData bool + + for r.Next() && !isClosed(w.quit) { + rec := r.Record() + w.metrics.recordsRead.WithLabelValues(w.id).Inc() + read, err := w.decodeAndDispatch(rec, segmentNum) + // keep true if data was read at least once + readData = readData || read + if err != nil { + return readData, errors.Wrapf(err, "error decoding record") + } + } + return readData, errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) +} + +// decodeAndDispatch first decodes a WAL record. Upon reading either Series or Entries from the WAL record, call the +// appropriate callbacks in the writeTo. +func (w *Watcher) decodeAndDispatch(b []byte, segmentNum int) (bool, error) { + var readData bool + + rec := recordPool.GetRecord() + if err := wal.DecodeRecord(b, rec); err != nil { + w.metrics.recordDecodeFails.WithLabelValues(w.id).Inc() + return readData, err + } + + // First process all series to ensure we don't write entries to non-existent series. + var firstErr error + w.actions.StoreSeries(rec.Series, segmentNum) + readData = true + + for _, entries := range rec.RefEntries { + if err := w.actions.AppendEntries(entries); err != nil && firstErr == nil { + firstErr = err + } + } + + return readData, firstErr +} + +func (w *Watcher) Stop() { + // first close the quit channel to order main mainLoop routine to stop + close(w.quit) + // upon calling stop, wait for main mainLoop execution to stop + <-w.done + + w.metrics.watchersRunning.WithLabelValues().Dec() +} + +// firstAndLast finds the first and last segment number for a WAL directory. +func (w *Watcher) firstAndLast() (int, int, error) { + refs, err := readSegmentNumbers(w.walDir) + if err != nil { + + return -1, -1, err + } + + if len(refs) == 0 { + return -1, -1, nil + } + + // Start with sentinel values and walk back to the first and last (min and max) + var first = math.MaxInt32 + var last = -1 + for _, segmentReg := range refs { + if segmentReg < first { + first = segmentReg + } + if segmentReg > last { + last = segmentReg + } + } + return first, last, nil +} + +// NotifyWrite allows the Watcher to subscribe to write events published by the Writer. When a write event is received +// we emit the signal to trigger a segment read on the watcher main routine. If the readNotify channel already is not being +// listened on, that means the main routine is processing a segment, or waiting because a non-handled error occurred. +// In that case we drop the signal and make the Watcher wait for the next one. +func (w *Watcher) NotifyWrite() { + select { + case w.readNotify <- struct{}{}: + // written notification to the channel + return + default: + // drop wal written signal if the channel is not being listened + w.metrics.droppedWriteNotifications.WithLabelValues(w.id).Inc() + } +} + +// isClosed checks in a non-blocking manner if a channel is closed or not. +func isClosed(c chan struct{}) bool { + select { + case <-c: + return true + default: + return false + } +} + +// readSegmentNumbers reads the given directory and returns all segment identifiers, that is, the index of each segment +// file. +func readSegmentNumbers(dir string) ([]int, error) { + files, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + var refs []int + for _, f := range files { + k, err := strconv.Atoi(f.Name()) + if err != nil { + continue + } + refs = append(refs, k) + } + return refs, nil +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher_metrics.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher_metrics.go new file mode 100644 index 00000000..7fd15c7a --- /dev/null +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/watcher_metrics.go @@ -0,0 +1,118 @@ +package wal + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" +) + +type WatcherMetrics struct { + recordsRead *prometheus.CounterVec + recordDecodeFails *prometheus.CounterVec + droppedWriteNotifications *prometheus.CounterVec + segmentRead *prometheus.CounterVec + currentSegment *prometheus.GaugeVec + watchersRunning *prometheus.GaugeVec +} + +func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { + m := &WatcherMetrics{ + recordsRead: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "records_read_total", + Help: "Number of records read by the WAL watcher from the WAL.", + }, + []string{"id"}, + ), + recordDecodeFails: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "record_decode_failures_total", + Help: "Number of records read by the WAL watcher that resulted in an error when decoding.", + }, + []string{"id"}, + ), + droppedWriteNotifications: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "dropped_write_notifications_total", + Help: "Number of dropped write notifications due to having one already buffered.", + }, + []string{"id"}, + ), + segmentRead: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "segment_read_total", + Help: "Number of segment reads triggered by the backup timer firing.", + }, + []string{"id", "reason"}, + ), + currentSegment: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "current_segment", + Help: "Current segment the WAL watcher is reading records from.", + }, + []string{"id"}, + ), + watchersRunning: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "loki", + Subsystem: "wal_watcher", + Name: "running", + Help: "Number of WAL watchers running.", + }, + nil, + ), + } + + // Collectors will be re-registered to registry if it's got reloaded + // Reuse the old collectors instead of panicking out. + if reg != nil { + if err := reg.Register(m.recordsRead); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.recordsRead = are.ExistingCollector.(*prometheus.CounterVec) + } + } + if err := reg.Register(m.recordDecodeFails); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.recordDecodeFails = are.ExistingCollector.(*prometheus.CounterVec) + } + } + if err := reg.Register(m.droppedWriteNotifications); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.droppedWriteNotifications = are.ExistingCollector.(*prometheus.CounterVec) + } + } + if err := reg.Register(m.segmentRead); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.segmentRead = are.ExistingCollector.(*prometheus.CounterVec) + } + } + if err := reg.Register(m.currentSegment); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.currentSegment = are.ExistingCollector.(*prometheus.GaugeVec) + } + } + if err := reg.Register(m.watchersRunning); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + m.watchersRunning = are.ExistingCollector.(*prometheus.GaugeVec) + } + } + } + + return m +} diff --git a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/writer.go b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/writer.go index b5eaa41c..8e754a01 100644 --- a/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/writer.go +++ b/vendor/github.com/grafana/loki/clients/pkg/promtail/wal/writer.go @@ -27,6 +27,19 @@ const ( minimumCleanSegmentsEvery = time.Second ) +// CleanupEventSubscriber is an interface that objects that want to receive events from the wal Writer can implement. After +// they can subscribe to events by adding themselves as subscribers on the Writer with writer.SubscribeCleanup. +type CleanupEventSubscriber interface { + WriteCleanup +} + +// WriteEventSubscriber is an interface that objects that want to receive an event when Writer writes to the WAL can +// implement, and later subscribe to the Writer via writer.SubscribeWrite. +type WriteEventSubscriber interface { + // NotifyWrite allows others to be notifier when Writer writes to the underlying WAL. + NotifyWrite() +} + // Writer implements api.EntryHandler, exposing a channel were scraping targets can write to. Reading from there, it // writes incoming entries to a WAL. // Also, since Writer is responsible for all changing operations over the WAL, therefore a routine is run for cleaning @@ -39,6 +52,12 @@ type Writer struct { wal WAL entryWriter *entryWriter + cleanupSubscribersLock sync.RWMutex + cleanupSubscribers []CleanupEventSubscriber + + writeSubscribersLock sync.RWMutex + writeSubscribers []WriteEventSubscriber + reclaimedOldSegmentsSpaceCounter *prometheus.CounterVec closeCleaner chan struct{} @@ -85,7 +104,17 @@ func (wrt *Writer) start(maxSegmentAge time.Duration) { go func() { defer wrt.wg.Done() for e := range wrt.entries { - wrt.entryWriter.WriteEntry(e, wrt.wal, wrt.log) + if err := wrt.entryWriter.WriteEntry(e, wrt.wal, wrt.log); err != nil { + level.Error(wrt.log).Log("msg", "failed to write entry", "err", err) + // if an error occurred while writing the wal, go to next entry and don't notify write subscribers + continue + } + + wrt.writeSubscribersLock.RLock() + for _, s := range wrt.writeSubscribers { + s.NotifyWrite() + } + wrt.writeSubscribersLock.RUnlock() } }() // WAL cleanup routine that cleans old segments @@ -148,6 +177,7 @@ func (wrt *Writer) cleanSegments(maxAge time.Duration) error { } // find the most recent, or head segment to avoid cleaning it up lastSegment := -1 + maxReclaimed := -1 for _, segment := range segments { if lastSegment < segment.number { lastSegment = segment.number @@ -161,11 +191,37 @@ func (wrt *Writer) cleanSegments(maxAge time.Duration) error { } level.Debug(wrt.log).Log("msg", "Deleted old wal segment", "segmentNum", segment.number) wrt.reclaimedOldSegmentsSpaceCounter.WithLabelValues().Add(float64(segment.size)) + // keep track of the largest segment number reclaimed + if segment.number > maxReclaimed { + maxReclaimed = segment.number + } + } + } + // if we reclaimed at least one segment, notify all subscribers + if maxReclaimed != -1 { + wrt.cleanupSubscribersLock.RLock() + defer wrt.cleanupSubscribersLock.RUnlock() + for _, subscriber := range wrt.cleanupSubscribers { + subscriber.SeriesReset(maxReclaimed) } } return nil } +// SubscribeCleanup adds a new CleanupEventSubscriber that will receive cleanup events. +func (wrt *Writer) SubscribeCleanup(subscriber CleanupEventSubscriber) { + wrt.cleanupSubscribersLock.Lock() + defer wrt.cleanupSubscribersLock.Unlock() + wrt.cleanupSubscribers = append(wrt.cleanupSubscribers, subscriber) +} + +// SubscribeWrite adds a new WriteEventSubscriber that will receive write events. +func (wrt *Writer) SubscribeWrite(subscriber WriteEventSubscriber) { + wrt.writeSubscribersLock.Lock() + defer wrt.writeSubscribersLock.Unlock() + wrt.writeSubscribers = append(wrt.writeSubscribers, subscriber) +} + // entryWriter writes api.Entry to a WAL, keeping in memory a single Record object that's reused // across every write. type entryWriter struct { @@ -184,18 +240,11 @@ func newEntryWriter() *entryWriter { // WriteEntry writes an api.Entry to a WAL. Note that since it's re-using the same Record object for every // write, it first has to be reset, and then overwritten accordingly. Therefore, WriteEntry is not thread-safe. -func (ew *entryWriter) WriteEntry(entry api.Entry, wl WAL, logger log.Logger) { +func (ew *entryWriter) WriteEntry(entry api.Entry, wl WAL, _ log.Logger) error { // Reset wal record slices ew.reusableWALRecord.RefEntries = ew.reusableWALRecord.RefEntries[:0] ew.reusableWALRecord.Series = ew.reusableWALRecord.Series[:0] - defer func() { - err := wl.Log(ew.reusableWALRecord) - if err != nil { - level.Error(logger).Log("msg", "Failed to write entry to wal", "err", err) - } - }() - var fp uint64 lbs := labels.FromMap(util.ModelLabelSetToMap(entry.Labels)) sort.Sort(lbs) @@ -212,6 +261,8 @@ func (ew *entryWriter) WriteEntry(entry api.Entry, wl WAL, logger log.Logger) { Ref: chunks.HeadSeriesRef(fp), Labels: lbs, }) + + return wl.Log(ew.reusableWALRecord) } type segmentRef struct { diff --git a/vendor/github.com/grafana/loki/pkg/ingester/wal/encoding.go b/vendor/github.com/grafana/loki/pkg/ingester/wal/encoding.go index 7c5411d7..fee0b8b5 100644 --- a/vendor/github.com/grafana/loki/pkg/ingester/wal/encoding.go +++ b/vendor/github.com/grafana/loki/pkg/ingester/wal/encoding.go @@ -25,11 +25,14 @@ const ( // WALRecordEntriesV2 is the type for the WAL record for samples with an // additional counter value for use in replaying without the ordering constraint. WALRecordEntriesV2 + // WALRecordEntriesV3 is the type for the WAL record for samples with structured metadata. + WALRecordEntriesV3 ) // The current type of Entries that this distribution writes. // Loki can read in a backwards compatible manner, but will write the newest variant. -const CurrentEntriesRec = WALRecordEntriesV2 +// TODO: Change to WALRecordEntriesV3? +const CurrentEntriesRec = WALRecordEntriesV3 // Record is a struct combining the series and samples record. type Record struct { @@ -128,6 +131,17 @@ outer: buf.PutVarint64(s.Timestamp.UnixNano() - first) buf.PutUvarint(len(s.Line)) buf.PutString(s.Line) + + if version >= WALRecordEntriesV3 { + // structured metadata + buf.PutUvarint(len(s.StructuredMetadata)) + for _, l := range s.StructuredMetadata { + buf.PutUvarint(len(l.Name)) + buf.PutString(l.Name) + buf.PutUvarint(len(l.Value)) + buf.PutString(l.Value) + } + } } } return buf.Get() @@ -158,9 +172,28 @@ func DecodeEntries(b []byte, version RecordType, rec *Record) error { lineLength := dec.Uvarint() line := dec.Bytes(lineLength) + var structuredMetadata []logproto.LabelAdapter + if version >= WALRecordEntriesV3 { + nStructuredMetadata := dec.Uvarint() + if nStructuredMetadata > 0 { + structuredMetadata = make([]logproto.LabelAdapter, 0, nStructuredMetadata) + for i := 0; dec.Err() == nil && i < nStructuredMetadata; i++ { + nameLength := dec.Uvarint() + name := dec.Bytes(nameLength) + valueLength := dec.Uvarint() + value := dec.Bytes(valueLength) + structuredMetadata = append(structuredMetadata, logproto.LabelAdapter{ + Name: string(name), + Value: string(value), + }) + } + } + } + refEntries.Entries = append(refEntries.Entries, logproto.Entry{ - Timestamp: time.Unix(0, baseTime+timeOffset), - Line: string(line), + Timestamp: time.Unix(0, baseTime+timeOffset), + Line: string(line), + StructuredMetadata: structuredMetadata, }) } @@ -195,7 +228,7 @@ func DecodeRecord(b []byte, walRec *Record) (err error) { case WALRecordSeries: userID = decbuf.UvarintStr() rSeries, err = dec.Series(decbuf.B, walRec.Series) - case WALRecordEntriesV1, WALRecordEntriesV2: + case WALRecordEntriesV1, WALRecordEntriesV2, WALRecordEntriesV3: userID = decbuf.UvarintStr() err = DecodeEntries(decbuf.B, t, walRec) default: diff --git a/vendor/github.com/grafana/loki/pkg/ingester/wal/recordpool.go b/vendor/github.com/grafana/loki/pkg/ingester/wal/recordpool.go index cc22b3f8..85bd09a1 100644 --- a/vendor/github.com/grafana/loki/pkg/ingester/wal/recordpool.go +++ b/vendor/github.com/grafana/loki/pkg/ingester/wal/recordpool.go @@ -2,13 +2,10 @@ package wal import ( "sync" - - "github.com/grafana/loki/pkg/logproto" ) type ResettingPool struct { rPool *sync.Pool // records - ePool *sync.Pool // entries bPool *sync.Pool // bytes } @@ -19,15 +16,11 @@ func NewRecordPool() *ResettingPool { return &Record{} }, }, - ePool: &sync.Pool{ - New: func() interface{} { - return make([]logproto.Entry, 0, 512) - }, - }, bPool: &sync.Pool{ New: func() interface{} { - buf := make([]byte, 0, 1<<10) // 1kb - return &buf + buf := new([]byte) // Attempt to force allocation on heap. + *buf = make([]byte, 0, 1<<10) // 1kb + return buf }, }, } @@ -36,9 +29,6 @@ func NewRecordPool() *ResettingPool { func (p *ResettingPool) GetRecord() *Record { rec := p.rPool.Get().(*Record) rec.Reset() - for _, ref := range rec.RefEntries { - p.PutEntries(ref.Entries) - } return rec } @@ -46,14 +36,6 @@ func (p *ResettingPool) PutRecord(r *Record) { p.rPool.Put(r) } -func (p *ResettingPool) GetEntries() []logproto.Entry { - return p.ePool.Get().([]logproto.Entry) -} - -func (p *ResettingPool) PutEntries(es []logproto.Entry) { - p.ePool.Put(es[:0]) // nolint:staticcheck -} - func (p *ResettingPool) GetBytes() *[]byte { return p.bPool.Get().(*[]byte) } diff --git a/vendor/github.com/grafana/loki/pkg/logproto/alias.go b/vendor/github.com/grafana/loki/pkg/logproto/alias.go index 27b984bc..ab378fcb 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/alias.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/alias.go @@ -10,6 +10,7 @@ import ( type Entry = push.Entry type Stream = push.Stream +type LabelAdapter = push.LabelAdapter type PushRequest = push.PushRequest type PushResponse = push.PushResponse type PusherClient = push.PusherClient diff --git a/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.pb.go b/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.pb.go new file mode 100644 index 00000000..1c7fb9fc --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.pb.go @@ -0,0 +1,1519 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/logproto/bloomgateway.proto + +package logproto + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_prometheus_common_model "github.com/prometheus/common/model" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FilterChunkRefRequest struct { + From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"` + Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"` + Refs []*GroupedChunkRefs `protobuf:"bytes,3,rep,name=refs,proto3" json:"refs,omitempty"` + Filters []*LineFilterExpression `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (m *FilterChunkRefRequest) Reset() { *m = FilterChunkRefRequest{} } +func (*FilterChunkRefRequest) ProtoMessage() {} +func (*FilterChunkRefRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a50b5dd1dbcd1415, []int{0} +} +func (m *FilterChunkRefRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FilterChunkRefRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FilterChunkRefRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FilterChunkRefRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FilterChunkRefRequest.Merge(m, src) +} +func (m *FilterChunkRefRequest) XXX_Size() int { + return m.Size() +} +func (m *FilterChunkRefRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FilterChunkRefRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FilterChunkRefRequest proto.InternalMessageInfo + +func (m *FilterChunkRefRequest) GetRefs() []*GroupedChunkRefs { + if m != nil { + return m.Refs + } + return nil +} + +func (m *FilterChunkRefRequest) GetFilters() []*LineFilterExpression { + if m != nil { + return m.Filters + } + return nil +} + +type FilterChunkRefResponse struct { + ChunkRefs []*GroupedChunkRefs `protobuf:"bytes,1,rep,name=chunkRefs,proto3" json:"chunkRefs,omitempty"` +} + +func (m *FilterChunkRefResponse) Reset() { *m = FilterChunkRefResponse{} } +func (*FilterChunkRefResponse) ProtoMessage() {} +func (*FilterChunkRefResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a50b5dd1dbcd1415, []int{1} +} +func (m *FilterChunkRefResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FilterChunkRefResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FilterChunkRefResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FilterChunkRefResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FilterChunkRefResponse.Merge(m, src) +} +func (m *FilterChunkRefResponse) XXX_Size() int { + return m.Size() +} +func (m *FilterChunkRefResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FilterChunkRefResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FilterChunkRefResponse proto.InternalMessageInfo + +func (m *FilterChunkRefResponse) GetChunkRefs() []*GroupedChunkRefs { + if m != nil { + return m.ChunkRefs + } + return nil +} + +type ShortRef struct { + From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"` + Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"` + Checksum uint32 `protobuf:"varint,3,opt,name=checksum,proto3" json:"checksum,omitempty"` +} + +func (m *ShortRef) Reset() { *m = ShortRef{} } +func (*ShortRef) ProtoMessage() {} +func (*ShortRef) Descriptor() ([]byte, []int) { + return fileDescriptor_a50b5dd1dbcd1415, []int{2} +} +func (m *ShortRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShortRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ShortRef.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ShortRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShortRef.Merge(m, src) +} +func (m *ShortRef) XXX_Size() int { + return m.Size() +} +func (m *ShortRef) XXX_DiscardUnknown() { + xxx_messageInfo_ShortRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ShortRef proto.InternalMessageInfo + +func (m *ShortRef) GetChecksum() uint32 { + if m != nil { + return m.Checksum + } + return 0 +} + +type GroupedChunkRefs struct { + Fingerprint uint64 `protobuf:"varint,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Tenant string `protobuf:"bytes,2,opt,name=tenant,proto3" json:"tenant,omitempty"` + Refs []*ShortRef `protobuf:"bytes,3,rep,name=refs,proto3" json:"refs,omitempty"` +} + +func (m *GroupedChunkRefs) Reset() { *m = GroupedChunkRefs{} } +func (*GroupedChunkRefs) ProtoMessage() {} +func (*GroupedChunkRefs) Descriptor() ([]byte, []int) { + return fileDescriptor_a50b5dd1dbcd1415, []int{3} +} +func (m *GroupedChunkRefs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupedChunkRefs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupedChunkRefs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupedChunkRefs) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupedChunkRefs.Merge(m, src) +} +func (m *GroupedChunkRefs) XXX_Size() int { + return m.Size() +} +func (m *GroupedChunkRefs) XXX_DiscardUnknown() { + xxx_messageInfo_GroupedChunkRefs.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupedChunkRefs proto.InternalMessageInfo + +func (m *GroupedChunkRefs) GetFingerprint() uint64 { + if m != nil { + return m.Fingerprint + } + return 0 +} + +func (m *GroupedChunkRefs) GetTenant() string { + if m != nil { + return m.Tenant + } + return "" +} + +func (m *GroupedChunkRefs) GetRefs() []*ShortRef { + if m != nil { + return m.Refs + } + return nil +} + +func init() { + proto.RegisterType((*FilterChunkRefRequest)(nil), "logproto.FilterChunkRefRequest") + proto.RegisterType((*FilterChunkRefResponse)(nil), "logproto.FilterChunkRefResponse") + proto.RegisterType((*ShortRef)(nil), "logproto.ShortRef") + proto.RegisterType((*GroupedChunkRefs)(nil), "logproto.GroupedChunkRefs") +} + +func init() { proto.RegisterFile("pkg/logproto/bloomgateway.proto", fileDescriptor_a50b5dd1dbcd1415) } + +var fileDescriptor_a50b5dd1dbcd1415 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x53, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0xf5, 0x34, 0x51, 0x9b, 0x4e, 0x41, 0xa0, 0x11, 0x54, 0x56, 0x90, 0x26, 0x96, 0x85, 0x20, + 0x2b, 0x5b, 0x2a, 0x9b, 0xae, 0x53, 0x41, 0x85, 0xc4, 0x6a, 0x40, 0x2c, 0xba, 0x73, 0xdc, 0xeb, + 0x87, 0xe2, 0x99, 0x6b, 0x66, 0xc6, 0x02, 0x76, 0x7c, 0x02, 0xfc, 0x05, 0x5f, 0xc0, 0x37, 0x74, + 0x99, 0x65, 0xc5, 0xa2, 0x22, 0xce, 0x86, 0x65, 0x3f, 0x01, 0xd5, 0xc6, 0x4d, 0x52, 0x81, 0x90, + 0x58, 0x75, 0xe5, 0x99, 0xb9, 0xe7, 0x8c, 0xef, 0x39, 0xe7, 0x0e, 0x1d, 0x95, 0xb3, 0x34, 0x2c, + 0x30, 0x2d, 0x35, 0x5a, 0x0c, 0xa7, 0x05, 0xa2, 0x4c, 0x23, 0x0b, 0xef, 0xa3, 0x8f, 0x41, 0x73, + 0xc4, 0x06, 0x5d, 0x71, 0xf8, 0x20, 0xc5, 0x14, 0x5b, 0xdc, 0xd5, 0xaa, 0xad, 0x0f, 0x1f, 0x6d, + 0x5c, 0xd0, 0x2d, 0xda, 0xa2, 0xff, 0x65, 0x8b, 0x3e, 0x7c, 0x91, 0x17, 0x16, 0xf4, 0x51, 0x56, + 0xa9, 0x99, 0x80, 0x44, 0xc0, 0xbb, 0x0a, 0x8c, 0x65, 0x47, 0xb4, 0x9f, 0x68, 0x94, 0x2e, 0xf1, + 0xc8, 0xb8, 0x37, 0x09, 0xcf, 0x2e, 0x46, 0xce, 0xf7, 0x8b, 0xd1, 0xd3, 0x34, 0xb7, 0x59, 0x35, + 0x0d, 0x62, 0x94, 0x61, 0xa9, 0x51, 0x82, 0xcd, 0xa0, 0x32, 0x61, 0x8c, 0x52, 0xa2, 0x0a, 0x25, + 0x9e, 0x42, 0x11, 0xbc, 0xc9, 0x25, 0x88, 0x86, 0xcc, 0x5e, 0xd2, 0x1d, 0x9b, 0x69, 0xac, 0xd2, + 0xcc, 0xdd, 0xfa, 0xbf, 0x7b, 0x3a, 0x3e, 0x0b, 0x68, 0x5f, 0x43, 0x62, 0xdc, 0x9e, 0xd7, 0x1b, + 0xef, 0x1d, 0x0c, 0x83, 0x6b, 0x21, 0xc7, 0x1a, 0xab, 0x12, 0x4e, 0xbb, 0xfe, 0x8d, 0x68, 0x70, + 0xec, 0x90, 0xee, 0x24, 0x8d, 0x30, 0xe3, 0xf6, 0x1b, 0x0a, 0x5f, 0x51, 0x5e, 0xe5, 0x0a, 0x5a, + 0xd5, 0xcf, 0x3f, 0x94, 0x1a, 0x8c, 0xc9, 0x51, 0x89, 0x0e, 0xee, 0x0b, 0xba, 0x7f, 0xd3, 0x12, + 0x53, 0xa2, 0x32, 0xc0, 0x0e, 0xe9, 0x6e, 0xdc, 0xfd, 0xc6, 0x25, 0xff, 0x6c, 0x64, 0x05, 0xf6, + 0xbf, 0x11, 0x3a, 0x78, 0x9d, 0xa1, 0xb6, 0x02, 0x92, 0x5b, 0x67, 0xed, 0x90, 0x0e, 0xe2, 0x0c, + 0xe2, 0x99, 0xa9, 0xa4, 0xdb, 0xf3, 0xc8, 0xf8, 0xae, 0xb8, 0xde, 0xfb, 0x96, 0xde, 0xbf, 0xa9, + 0x8b, 0x79, 0x74, 0x2f, 0xc9, 0x55, 0x0a, 0xba, 0xd4, 0xb9, 0xb2, 0x8d, 0x8c, 0xbe, 0x58, 0x3f, + 0x62, 0xfb, 0x74, 0xdb, 0x82, 0x8a, 0x94, 0x6d, 0x7a, 0xdb, 0x15, 0xbf, 0x77, 0xec, 0xc9, 0x46, + 0x88, 0x6c, 0xe5, 0x5d, 0xe7, 0x4d, 0x1b, 0xde, 0x41, 0x42, 0xef, 0x4c, 0xae, 0x26, 0xfd, 0xb8, + 0x9d, 0x74, 0xf6, 0x96, 0xde, 0xdb, 0x8c, 0xc4, 0xb0, 0xd1, 0x8a, 0xfc, 0xc7, 0x01, 0x1e, 0x7a, + 0x7f, 0x07, 0xb4, 0x71, 0xfa, 0xce, 0xe4, 0x64, 0xbe, 0xe0, 0xce, 0xf9, 0x82, 0x3b, 0x97, 0x0b, + 0x4e, 0x3e, 0xd5, 0x9c, 0x7c, 0xad, 0x39, 0x39, 0xab, 0x39, 0x99, 0xd7, 0x9c, 0xfc, 0xa8, 0x39, + 0xf9, 0x59, 0x73, 0xe7, 0xb2, 0xe6, 0xe4, 0xf3, 0x92, 0x3b, 0xf3, 0x25, 0x77, 0xce, 0x97, 0xdc, + 0x39, 0x79, 0xbc, 0xe6, 0x70, 0xaa, 0xa3, 0x24, 0x52, 0x51, 0x58, 0xe0, 0x2c, 0x0f, 0xd7, 0x5f, + 0xda, 0x74, 0xbb, 0xf9, 0x3c, 0xfb, 0x15, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf6, 0x2b, 0x1c, 0xc1, + 0x03, 0x00, 0x00, +} + +func (this *FilterChunkRefRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FilterChunkRefRequest) + if !ok { + that2, ok := that.(FilterChunkRefRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.From.Equal(that1.From) { + return false + } + if !this.Through.Equal(that1.Through) { + return false + } + if len(this.Refs) != len(that1.Refs) { + return false + } + for i := range this.Refs { + if !this.Refs[i].Equal(that1.Refs[i]) { + return false + } + } + if len(this.Filters) != len(that1.Filters) { + return false + } + for i := range this.Filters { + if !this.Filters[i].Equal(that1.Filters[i]) { + return false + } + } + return true +} +func (this *FilterChunkRefResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FilterChunkRefResponse) + if !ok { + that2, ok := that.(FilterChunkRefResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.ChunkRefs) != len(that1.ChunkRefs) { + return false + } + for i := range this.ChunkRefs { + if !this.ChunkRefs[i].Equal(that1.ChunkRefs[i]) { + return false + } + } + return true +} +func (this *ShortRef) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ShortRef) + if !ok { + that2, ok := that.(ShortRef) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.From.Equal(that1.From) { + return false + } + if !this.Through.Equal(that1.Through) { + return false + } + if this.Checksum != that1.Checksum { + return false + } + return true +} +func (this *GroupedChunkRefs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GroupedChunkRefs) + if !ok { + that2, ok := that.(GroupedChunkRefs) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Fingerprint != that1.Fingerprint { + return false + } + if this.Tenant != that1.Tenant { + return false + } + if len(this.Refs) != len(that1.Refs) { + return false + } + for i := range this.Refs { + if !this.Refs[i].Equal(that1.Refs[i]) { + return false + } + } + return true +} +func (this *FilterChunkRefRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.FilterChunkRefRequest{") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n") + if this.Refs != nil { + s = append(s, "Refs: "+fmt.Sprintf("%#v", this.Refs)+",\n") + } + if this.Filters != nil { + s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FilterChunkRefResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.FilterChunkRefResponse{") + if this.ChunkRefs != nil { + s = append(s, "ChunkRefs: "+fmt.Sprintf("%#v", this.ChunkRefs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ShortRef) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.ShortRef{") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n") + s = append(s, "Checksum: "+fmt.Sprintf("%#v", this.Checksum)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GroupedChunkRefs) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.GroupedChunkRefs{") + s = append(s, "Fingerprint: "+fmt.Sprintf("%#v", this.Fingerprint)+",\n") + s = append(s, "Tenant: "+fmt.Sprintf("%#v", this.Tenant)+",\n") + if this.Refs != nil { + s = append(s, "Refs: "+fmt.Sprintf("%#v", this.Refs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringBloomgateway(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BloomGatewayClient is the client API for BloomGateway service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BloomGatewayClient interface { + FilterChunkRefs(ctx context.Context, in *FilterChunkRefRequest, opts ...grpc.CallOption) (*FilterChunkRefResponse, error) +} + +type bloomGatewayClient struct { + cc *grpc.ClientConn +} + +func NewBloomGatewayClient(cc *grpc.ClientConn) BloomGatewayClient { + return &bloomGatewayClient{cc} +} + +func (c *bloomGatewayClient) FilterChunkRefs(ctx context.Context, in *FilterChunkRefRequest, opts ...grpc.CallOption) (*FilterChunkRefResponse, error) { + out := new(FilterChunkRefResponse) + err := c.cc.Invoke(ctx, "/logproto.BloomGateway/FilterChunkRefs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BloomGatewayServer is the server API for BloomGateway service. +type BloomGatewayServer interface { + FilterChunkRefs(context.Context, *FilterChunkRefRequest) (*FilterChunkRefResponse, error) +} + +// UnimplementedBloomGatewayServer can be embedded to have forward compatible implementations. +type UnimplementedBloomGatewayServer struct { +} + +func (*UnimplementedBloomGatewayServer) FilterChunkRefs(ctx context.Context, req *FilterChunkRefRequest) (*FilterChunkRefResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FilterChunkRefs not implemented") +} + +func RegisterBloomGatewayServer(s *grpc.Server, srv BloomGatewayServer) { + s.RegisterService(&_BloomGateway_serviceDesc, srv) +} + +func _BloomGateway_FilterChunkRefs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FilterChunkRefRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BloomGatewayServer).FilterChunkRefs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.BloomGateway/FilterChunkRefs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BloomGatewayServer).FilterChunkRefs(ctx, req.(*FilterChunkRefRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BloomGateway_serviceDesc = grpc.ServiceDesc{ + ServiceName: "logproto.BloomGateway", + HandlerType: (*BloomGatewayServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FilterChunkRefs", + Handler: _BloomGateway_FilterChunkRefs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/logproto/bloomgateway.proto", +} + +func (m *FilterChunkRefRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChunkRefRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FilterChunkRefRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBloomgateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBloomgateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Through != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.Through)) + i-- + dAtA[i] = 0x10 + } + if m.From != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.From)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FilterChunkRefResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChunkRefResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FilterChunkRefResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChunkRefs) > 0 { + for iNdEx := len(m.ChunkRefs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChunkRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBloomgateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ShortRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShortRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShortRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Checksum != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.Checksum)) + i-- + dAtA[i] = 0x18 + } + if m.Through != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.Through)) + i-- + dAtA[i] = 0x10 + } + if m.From != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.From)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GroupedChunkRefs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupedChunkRefs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupedChunkRefs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBloomgateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintBloomgateway(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x12 + } + if m.Fingerprint != 0 { + i = encodeVarintBloomgateway(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintBloomgateway(dAtA []byte, offset int, v uint64) int { + offset -= sovBloomgateway(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FilterChunkRefRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != 0 { + n += 1 + sovBloomgateway(uint64(m.From)) + } + if m.Through != 0 { + n += 1 + sovBloomgateway(uint64(m.Through)) + } + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovBloomgateway(uint64(l)) + } + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovBloomgateway(uint64(l)) + } + } + return n +} + +func (m *FilterChunkRefResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkRefs) > 0 { + for _, e := range m.ChunkRefs { + l = e.Size() + n += 1 + l + sovBloomgateway(uint64(l)) + } + } + return n +} + +func (m *ShortRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != 0 { + n += 1 + sovBloomgateway(uint64(m.From)) + } + if m.Through != 0 { + n += 1 + sovBloomgateway(uint64(m.Through)) + } + if m.Checksum != 0 { + n += 1 + sovBloomgateway(uint64(m.Checksum)) + } + return n +} + +func (m *GroupedChunkRefs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Fingerprint != 0 { + n += 1 + sovBloomgateway(uint64(m.Fingerprint)) + } + l = len(m.Tenant) + if l > 0 { + n += 1 + l + sovBloomgateway(uint64(l)) + } + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovBloomgateway(uint64(l)) + } + } + return n +} + +func sovBloomgateway(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBloomgateway(x uint64) (n int) { + return sovBloomgateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FilterChunkRefRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]*GroupedChunkRefs{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(f.String(), "GroupedChunkRefs", "GroupedChunkRefs", 1) + "," + } + repeatedStringForRefs += "}" + repeatedStringForFilters := "[]*LineFilterExpression{" + for _, f := range this.Filters { + repeatedStringForFilters += strings.Replace(fmt.Sprintf("%v", f), "LineFilterExpression", "LineFilterExpression", 1) + "," + } + repeatedStringForFilters += "}" + s := strings.Join([]string{`&FilterChunkRefRequest{`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Through:` + fmt.Sprintf("%v", this.Through) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `Filters:` + repeatedStringForFilters + `,`, + `}`, + }, "") + return s +} +func (this *FilterChunkRefResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunkRefs := "[]*GroupedChunkRefs{" + for _, f := range this.ChunkRefs { + repeatedStringForChunkRefs += strings.Replace(f.String(), "GroupedChunkRefs", "GroupedChunkRefs", 1) + "," + } + repeatedStringForChunkRefs += "}" + s := strings.Join([]string{`&FilterChunkRefResponse{`, + `ChunkRefs:` + repeatedStringForChunkRefs + `,`, + `}`, + }, "") + return s +} +func (this *ShortRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShortRef{`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Through:` + fmt.Sprintf("%v", this.Through) + `,`, + `Checksum:` + fmt.Sprintf("%v", this.Checksum) + `,`, + `}`, + }, "") + return s +} +func (this *GroupedChunkRefs) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]*ShortRef{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(f.String(), "ShortRef", "ShortRef", 1) + "," + } + repeatedStringForRefs += "}" + s := strings.Join([]string{`&GroupedChunkRefs{`, + `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `}`, + }, "") + return s +} +func valueToStringBloomgateway(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FilterChunkRefRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilterChunkRefRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilterChunkRefRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Through", wireType) + } + m.Through = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBloomgateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBloomgateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, &GroupedChunkRefs{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBloomgateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBloomgateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &LineFilterExpression{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBloomgateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FilterChunkRefResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilterChunkRefResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilterChunkRefResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkRefs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBloomgateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBloomgateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkRefs = append(m.ChunkRefs, &GroupedChunkRefs{}) + if err := m.ChunkRefs[len(m.ChunkRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBloomgateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShortRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShortRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShortRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Through", wireType) + } + m.Through = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + } + m.Checksum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Checksum |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBloomgateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupedChunkRefs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupedChunkRefs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupedChunkRefs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType) + } + m.Fingerprint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Fingerprint |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBloomgateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBloomgateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBloomgateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBloomgateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, &ShortRef{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBloomgateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBloomgateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBloomgateway(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBloomgateway + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthBloomgateway + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBloomgateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipBloomgateway(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthBloomgateway + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthBloomgateway = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBloomgateway = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.proto b/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.proto new file mode 100644 index 00000000..71e762cc --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logproto/bloomgateway.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package logproto; + +import "gogoproto/gogo.proto"; +import "pkg/logproto/logproto.proto"; + +option go_package = "github.com/grafana/loki/pkg/logproto"; + +message FilterChunkRefRequest { + int64 from = 1 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 2 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + repeated GroupedChunkRefs refs = 3; + repeated logproto.LineFilterExpression filters = 4; +} + +message FilterChunkRefResponse { + repeated GroupedChunkRefs chunkRefs = 1; +} + +message ShortRef { + int64 from = 1 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 2 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + uint32 checksum = 3; +} + +message GroupedChunkRefs { + uint64 fingerprint = 1; + string tenant = 2; + repeated ShortRef refs = 3; +} + +service BloomGateway { + rpc FilterChunkRefs(FilterChunkRefRequest) returns (FilterChunkRefResponse) {} +} diff --git a/vendor/github.com/grafana/loki/pkg/logproto/compat.go b/vendor/github.com/grafana/loki/pkg/logproto/compat.go index b9df2861..268e588d 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/compat.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/compat.go @@ -1,6 +1,7 @@ package logproto import ( + "encoding/binary" stdjson "encoding/json" "fmt" "math" @@ -10,6 +11,7 @@ import ( "time" "unsafe" + "github.com/cespare/xxhash/v2" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" @@ -18,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util" ) @@ -50,47 +53,6 @@ func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels { return *(*labels.Labels)(unsafe.Pointer(&ls)) } -// FromLabelAdaptersToLabelsWithCopy converts []LabelAdapter to labels.Labels. -// Do NOT use unsafe to convert between data types because this function may -// get in input labels whose data structure is reused. -func FromLabelAdaptersToLabelsWithCopy(input []LabelAdapter) labels.Labels { - return CopyLabels(FromLabelAdaptersToLabels(input)) -} - -// Efficiently copies labels input slice. To be used in cases where input slice -// can be reused, but long-term copy is needed. -func CopyLabels(input []labels.Label) labels.Labels { - result := make(labels.Labels, len(input)) - - size := 0 - for _, l := range input { - size += len(l.Name) - size += len(l.Value) - } - - // Copy all strings into the buffer, and use 'yoloString' to convert buffer - // slices to strings. - buf := make([]byte, size) - - for i, l := range input { - result[i].Name, buf = copyStringToBuffer(l.Name, buf) - result[i].Value, buf = copyStringToBuffer(l.Value, buf) - } - return result -} - -// Copies string to buffer (which must be big enough), and converts buffer slice containing -// the string copy into new string. -func copyStringToBuffer(in string, buf []byte) (string, []byte) { - l := len(in) - c := copy(buf, in) - if c != l { - panic("not copied full string") - } - - return yoloString(buf[0:l]), buf[l:] -} - // FromLabelsToLabelAdapters casts labels.Labels to []LabelAdapter. // It uses unsafe, but as LabelAdapter == labels.Label this should be safe. // This allows us to use labels.Labels directly in protos. @@ -273,13 +235,13 @@ func MergeSeriesResponses(responses []*SeriesResponse) (*SeriesResponse, error) // Satisfy definitions.Request // GetStart returns the start timestamp of the request in milliseconds. -func (m *IndexStatsRequest) GetStart() int64 { - return int64(m.From) +func (m *IndexStatsRequest) GetStart() time.Time { + return time.Unix(0, m.From.UnixNano()) } // GetEnd returns the end timestamp of the request in milliseconds. -func (m *IndexStatsRequest) GetEnd() int64 { - return int64(m.Through) +func (m *IndexStatsRequest) GetEnd() time.Time { + return time.Unix(0, m.Through.UnixNano()) } // GetStep returns the step of the request in milliseconds. @@ -294,25 +256,163 @@ func (m *IndexStatsRequest) GetQuery() string { func (m *IndexStatsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } // WithStartEnd clone the current request with different start and end timestamp. -func (m *IndexStatsRequest) WithStartEnd(startTime int64, endTime int64) definitions.Request { - new := *m - new.From = model.TimeFromUnixNano(startTime * int64(time.Millisecond)) - new.Through = model.TimeFromUnixNano(endTime * int64(time.Millisecond)) - return &new +func (m *IndexStatsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) + return &clone +} + +// WithStartEndForCache implements resultscache.Request. +func (m *IndexStatsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) } // WithQuery clone the current request with a different query. func (m *IndexStatsRequest) WithQuery(query string) definitions.Request { - new := *m - new.Matchers = query - return &new + clone := *m + clone.Matchers = query + return &clone } // LogToSpan writes information about this request to an OpenTracing span func (m *IndexStatsRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", m.GetQuery()), - otlog.String("start", timestamp.Time(m.GetStart()).String()), - otlog.String("end", timestamp.Time(m.GetEnd()).String()), + otlog.String("start", timestamp.Time(int64(m.From)).String()), + otlog.String("end", timestamp.Time(int64(m.Through)).String()), + ) +} + +func (i *IndexStatsResponse) GetHeaders() []*definitions.PrometheusResponseHeader { + return nil +} + +// Satisfy definitions.Request for Volume + +// GetStart returns the start timestamp of the request in milliseconds. +func (m *VolumeRequest) GetStart() time.Time { + return time.UnixMilli(int64(m.From)) +} + +// GetEnd returns the end timestamp of the request in milliseconds. +func (m *VolumeRequest) GetEnd() time.Time { + return time.UnixMilli(int64(m.Through)) +} + +// GetQuery returns the query of the request. +func (m *VolumeRequest) GetQuery() string { + return m.Matchers +} + +// GetCachingOptions returns the caching options. +func (m *VolumeRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +// WithStartEnd clone the current request with different start and end timestamp. +func (m *VolumeRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) + return &clone +} + +// WithStartEndForCache implements resultscache.Request. +func (m *VolumeRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + +// WithQuery clone the current request with a different query. +func (m *VolumeRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Matchers = query + return &clone +} + +// LogToSpan writes information about this request to an OpenTracing span +func (m *VolumeRequest) LogToSpan(sp opentracing.Span) { + sp.LogFields( + otlog.String("query", m.GetQuery()), + otlog.String("start", timestamp.Time(int64(m.From)).String()), + otlog.String("end", timestamp.Time(int64(m.Through)).String()), ) } + +// Satisfy definitions.Request for FilterChunkRefRequest + +// GetStart returns the start timestamp of the request in milliseconds. +func (m *FilterChunkRefRequest) GetStart() time.Time { + return time.UnixMilli(int64(m.From)) +} + +// GetEnd returns the end timestamp of the request in milliseconds. +func (m *FilterChunkRefRequest) GetEnd() time.Time { + return time.UnixMilli(int64(m.Through)) +} + +// GetStep returns the step of the request in milliseconds. Always 0. +func (m *FilterChunkRefRequest) GetStep() int64 { + return 0 +} + +// GetQuery returns the query of the request. +// The query is the hash for the input chunks refs and the filter expressions. +func (m *FilterChunkRefRequest) GetQuery() string { + var encodeBuf []byte + var chunksHash uint64 + if len(m.Refs) > 0 { + h := xxhash.New() + for _, ref := range m.Refs { + _, _ = h.Write(binary.AppendUvarint(encodeBuf[:0], ref.Fingerprint)) + } + chunksHash = h.Sum64() + } + + // Short circuit if there are no filters. + if len(m.Filters) == 0 { + return fmt.Sprintf("%d", chunksHash) + } + + var sb strings.Builder + for i, filter := range m.Filters { + if i > 0 { + sb.WriteString(",") + } + sb.Write(fmt.Appendf(encodeBuf[:0], "%d", filter.Operator)) + sb.WriteString("-") + sb.WriteString(filter.Match) + } + + return fmt.Sprintf("%d/%s", chunksHash, sb.String()) +} + +// GetCachingOptions returns the caching options. +func (m *FilterChunkRefRequest) GetCachingOptions() (res resultscache.CachingOptions) { return } + +// WithStartEndForCache implements resultscache.Request. +func (m *FilterChunkRefRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + // We Remove the chunks that are not within the given time range. + chunkRefs := make([]*GroupedChunkRefs, 0, len(m.Refs)) + for _, chunkRef := range m.Refs { + refs := make([]*ShortRef, 0, len(chunkRef.Refs)) + for _, ref := range chunkRef.Refs { + if end.Before(ref.From.Time()) || ref.Through.Time().Before(start) { + continue + } + refs = append(refs, ref) + } + if len(refs) > 0 { + chunkRefs = append(chunkRefs, &GroupedChunkRefs{ + Fingerprint: chunkRef.Fingerprint, + Tenant: chunkRef.Tenant, + Refs: refs, + }) + } + } + + clone := *m + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) + clone.Refs = chunkRefs + + return &clone +} diff --git a/vendor/github.com/grafana/loki/pkg/logproto/extensions.go b/vendor/github.com/grafana/loki/pkg/logproto/extensions.go index 27af9e82..ee4e9309 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/extensions.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/extensions.go @@ -1,24 +1,54 @@ package logproto import ( + "sort" "strings" "sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it. + "github.com/cespare/xxhash/v2" "github.com/dustin/go-humanize" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/pkg/storage/stores/tsdb/index" + "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) -// Note, this is not very efficient and use should be minimized as it requires label construction on each comparison -type SeriesIdentifiers []SeriesIdentifier +// This is the separator define in the Prometheus Labels.Hash function. +var seps = []byte{'\xff'} -func (ids SeriesIdentifiers) Len() int { return len(ids) } -func (ids SeriesIdentifiers) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] } -func (ids SeriesIdentifiers) Less(i, j int) bool { - a, b := labels.FromMap(ids[i].Labels), labels.FromMap(ids[j].Labels) - return labels.Compare(a, b) <= 0 +// Hash returns hash of the labels according to Prometheus' Labels.Hash function. +// `b` and `keysForLabels` are buffers that should be reused to avoid +// allocations. +func (id SeriesIdentifier) Hash(b []byte, keysForLabels []string) (uint64, []string) { + keysForLabels = keysForLabels[:0] + for k := range id.Labels { + keysForLabels = append(keysForLabels, k) + } + sort.Strings(keysForLabels) + + // Use xxhash.Sum64(b) for fast path as it's faster. + b = b[:0] + for i, name := range keysForLabels { + value := id.Labels[name] + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, name := range keysForLabels[i:] { + value := id.Labels[name] + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64(), keysForLabels + } + + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), keysForLabels } type Streams []Stream @@ -37,10 +67,10 @@ func (m *IndexStatsResponse) AddStream(_ model.Fingerprint) { } // Safe for concurrent use -func (m *IndexStatsResponse) AddChunk(_ model.Fingerprint, chk index.ChunkMeta) { - atomic.AddUint64(&m.Chunks, 1) - atomic.AddUint64(&m.Bytes, uint64(chk.KB<<10)) - atomic.AddUint64(&m.Entries, uint64(chk.Entries)) +func (m *IndexStatsResponse) AddChunkStats(s index.ChunkStats) { + atomic.AddUint64(&m.Chunks, s.Chunks) + atomic.AddUint64(&m.Bytes, s.KB<<10) + atomic.AddUint64(&m.Entries, s.Entries) } func (m *IndexStatsResponse) Stats() IndexStatsResponse { diff --git a/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.pb.go b/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.pb.go index 17c470d7..86b2665e 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.pb.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.pb.go @@ -6,7 +6,6 @@ package logproto import ( context "context" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -28,29 +27,30 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("pkg/logproto/indexgateway.proto", fileDescriptor_d27585148d0a52c8) } var fileDescriptor_d27585148d0a52c8 = []byte{ - // 351 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4a, 0xfb, 0x30, - 0x1c, 0xc7, 0x13, 0xfe, 0xf0, 0x47, 0xa3, 0x78, 0x08, 0xc2, 0xa4, 0xd3, 0x08, 0xe2, 0x41, 0x2f, - 0xab, 0xe8, 0x1b, 0x28, 0xac, 0x0c, 0xa6, 0xe0, 0x04, 0x0f, 0x3b, 0x88, 0xe9, 0xfc, 0x2d, 0x2b, - 0xeb, 0x9a, 0xda, 0xa6, 0xe8, 0x6e, 0x3e, 0x82, 0x8f, 0xe1, 0xa3, 0x78, 0xdc, 0x71, 0x47, 0x97, - 0x5d, 0x3c, 0xee, 0xec, 0x49, 0xda, 0xd2, 0x2d, 0x1b, 0x1d, 0x78, 0x6a, 0xfa, 0xfd, 0x7e, 0xf2, - 0xf9, 0xd1, 0x26, 0xe4, 0x30, 0xec, 0x0b, 0xdb, 0x97, 0x22, 0x8c, 0xa4, 0x92, 0xb6, 0x17, 0x3c, - 0xc1, 0xab, 0xe0, 0x0a, 0x5e, 0xf8, 0xb0, 0x96, 0x45, 0x74, 0xc7, 0xcc, 0x42, 0xd7, 0xda, 0x15, - 0x52, 0xc8, 0x9c, 0x4e, 0x57, 0x39, 0x65, 0x55, 0x97, 0x34, 0xc5, 0x22, 0x2f, 0xcf, 0x7f, 0xfe, - 0x91, 0xed, 0x46, 0x6a, 0x71, 0x72, 0x0b, 0x6d, 0x10, 0x72, 0x9b, 0x40, 0x34, 0xcc, 0x42, 0x5a, - 0xad, 0xcd, 0xf9, 0x45, 0xda, 0x82, 0xe7, 0x04, 0x62, 0x65, 0xed, 0x97, 0x97, 0x71, 0x28, 0x83, - 0x18, 0xce, 0x30, 0x6d, 0x92, 0x2d, 0x07, 0xd4, 0x55, 0x2f, 0x09, 0xfa, 0x2d, 0xe8, 0x52, 0x03, - 0x37, 0xe2, 0x42, 0x76, 0xb0, 0xa6, 0xcd, 0x6d, 0x47, 0x88, 0xd6, 0xc9, 0xa6, 0x03, 0xea, 0x0e, - 0x22, 0x0f, 0x62, 0x6a, 0x2d, 0xd1, 0x79, 0x58, 0x98, 0xaa, 0xa5, 0xdd, 0xdc, 0xf3, 0x40, 0x2a, - 0x4d, 0xee, 0x82, 0x7f, 0xc3, 0x07, 0x10, 0xd7, 0x65, 0x74, 0x0d, 0x2a, 0xf2, 0x3a, 0xe9, 0x1b, - 0x3d, 0x59, 0xec, 0x5c, 0x83, 0x14, 0x33, 0x2a, 0x2b, 0xa4, 0xe1, 0x7f, 0x24, 0x7b, 0x59, 0x74, - 0xcf, 0xfd, 0x64, 0x75, 0xc0, 0xe9, 0xca, 0xb6, 0x12, 0xe6, 0x0f, 0x13, 0x1c, 0xb2, 0x91, 0x7e, - 0x98, 0xe2, 0x2a, 0x36, 0x0f, 0x28, 0xfb, 0xfd, 0x59, 0x5a, 0x72, 0x40, 0x66, 0x59, 0x88, 0x2e, - 0xdb, 0xa3, 0x09, 0x43, 0xe3, 0x09, 0x43, 0xb3, 0x09, 0xc3, 0x6f, 0x9a, 0xe1, 0x0f, 0xcd, 0xf0, - 0xa7, 0x66, 0x78, 0xa4, 0x19, 0xfe, 0xd2, 0x0c, 0x7f, 0x6b, 0x86, 0x66, 0x9a, 0xe1, 0xf7, 0x29, - 0x43, 0xa3, 0x29, 0x43, 0xe3, 0x29, 0x43, 0xed, 0x63, 0xe1, 0xa9, 0x5e, 0xe2, 0xd6, 0x3a, 0x72, - 0x60, 0x8b, 0x88, 0x77, 0x79, 0xc0, 0x6d, 0x5f, 0xf6, 0x3d, 0xdb, 0xbc, 0x67, 0xee, 0xff, 0xec, - 0x71, 0xf1, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x22, 0x04, 0xe7, 0xc5, 0x02, 0x00, 0x00, + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x4e, 0xc2, 0x40, + 0x18, 0xc7, 0xef, 0x12, 0x63, 0xf4, 0x34, 0x0e, 0xb7, 0x40, 0x40, 0xcf, 0xc4, 0x38, 0xe8, 0x42, + 0x8d, 0xbe, 0x80, 0xd1, 0x84, 0x86, 0x04, 0x4d, 0xc4, 0x84, 0x81, 0xc1, 0x78, 0xc5, 0x8f, 0xd2, + 0x50, 0x7a, 0xb5, 0xbd, 0x46, 0xd9, 0x7c, 0x04, 0x1f, 0xc3, 0x87, 0xf0, 0x01, 0x1c, 0x19, 0x19, + 0xe5, 0x58, 0x1c, 0x79, 0x04, 0xc3, 0x35, 0x85, 0x03, 0x4b, 0xe2, 0x04, 0xfd, 0xfd, 0x7f, 0xdf, + 0xff, 0x4b, 0xef, 0x4a, 0x0e, 0xc3, 0x9e, 0x6b, 0xf9, 0xc2, 0x0d, 0x23, 0x21, 0x85, 0xe5, 0x05, + 0x4f, 0xf0, 0xea, 0x72, 0x09, 0x2f, 0x7c, 0x50, 0xd1, 0x88, 0xee, 0x99, 0x2c, 0x74, 0x4a, 0xe5, + 0xa5, 0x81, 0xec, 0x4f, 0x2a, 0x9f, 0x7f, 0x6e, 0x90, 0xdd, 0xda, 0xcc, 0xb7, 0x53, 0x9f, 0xd6, + 0x08, 0xb9, 0x4b, 0x20, 0x1a, 0x68, 0x48, 0xcb, 0x95, 0xb9, 0xbf, 0xa0, 0x0d, 0x78, 0x4e, 0x20, + 0x96, 0xa5, 0xfd, 0xfc, 0x30, 0x0e, 0x45, 0x10, 0xc3, 0x19, 0xa6, 0x75, 0xb2, 0x63, 0x83, 0xbc, + 0xee, 0x26, 0x41, 0xaf, 0x01, 0x1d, 0x6a, 0xe8, 0x06, 0xce, 0xca, 0x0e, 0xd6, 0xa4, 0x69, 0xdb, + 0x11, 0xa2, 0x55, 0xb2, 0x6d, 0x83, 0xbc, 0x87, 0xc8, 0x83, 0x98, 0x96, 0x96, 0xec, 0x14, 0x66, + 0x4d, 0xe5, 0xdc, 0x6c, 0xde, 0xf3, 0x40, 0x0a, 0x75, 0xee, 0x80, 0x7f, 0xcb, 0xfb, 0x10, 0x57, + 0x45, 0x74, 0x03, 0x32, 0xf2, 0xda, 0xb3, 0x27, 0x7a, 0xb2, 0x98, 0x5c, 0xa3, 0x64, 0x3b, 0x0a, + 0x2b, 0xa6, 0xd1, 0xff, 0x48, 0x8a, 0x1a, 0x35, 0xb9, 0x9f, 0xac, 0x2e, 0x38, 0x5d, 0x19, 0xcb, + 0x71, 0xfe, 0xb1, 0xc1, 0x26, 0x5b, 0xb3, 0x17, 0x93, 0x5c, 0xc6, 0xe6, 0x05, 0xe9, 0xe3, 0xd7, + 0x34, 0xe7, 0x82, 0xcc, 0x70, 0x5e, 0x74, 0xa9, 0x8f, 0xb4, 0x29, 0xfc, 0xa4, 0x0f, 0xd4, 0x58, + 0x98, 0x92, 0xac, 0xa5, 0xf8, 0x37, 0xc8, 0x1a, 0xae, 0x5a, 0xc3, 0x31, 0x43, 0xa3, 0x31, 0x43, + 0xd3, 0x31, 0xc3, 0x6f, 0x8a, 0xe1, 0x0f, 0xc5, 0xf0, 0x97, 0x62, 0x78, 0xa8, 0x18, 0xfe, 0x56, + 0x0c, 0xff, 0x28, 0x86, 0xa6, 0x8a, 0xe1, 0xf7, 0x09, 0x43, 0xc3, 0x09, 0x43, 0xa3, 0x09, 0x43, + 0xad, 0x63, 0xd7, 0x93, 0xdd, 0xc4, 0xa9, 0xb4, 0x45, 0xdf, 0x72, 0x23, 0xde, 0xe1, 0x01, 0xb7, + 0x7c, 0xd1, 0xf3, 0x2c, 0xf3, 0x4b, 0x75, 0x36, 0xf5, 0xcf, 0xc5, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x7a, 0x1a, 0x28, 0xb4, 0xf1, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -76,6 +76,9 @@ type IndexGatewayClient interface { // Note: this MUST be the same as the variant defined in // logproto.proto on the Querier service. GetStats(ctx context.Context, in *IndexStatsRequest, opts ...grpc.CallOption) (*IndexStatsResponse, error) + // Note: this MUST be the same as the variant defined in + // logproto.proto on the Querier service. + GetVolume(ctx context.Context, in *VolumeRequest, opts ...grpc.CallOption) (*VolumeResponse, error) } type indexGatewayClient struct { @@ -163,6 +166,15 @@ func (c *indexGatewayClient) GetStats(ctx context.Context, in *IndexStatsRequest return out, nil } +func (c *indexGatewayClient) GetVolume(ctx context.Context, in *VolumeRequest, opts ...grpc.CallOption) (*VolumeResponse, error) { + out := new(VolumeResponse) + err := c.cc.Invoke(ctx, "/indexgatewaypb.IndexGateway/GetVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IndexGatewayServer is the server API for IndexGateway service. type IndexGatewayServer interface { /// QueryIndex reads the indexes required for given query & sends back the batch of rows @@ -176,6 +188,9 @@ type IndexGatewayServer interface { // Note: this MUST be the same as the variant defined in // logproto.proto on the Querier service. GetStats(context.Context, *IndexStatsRequest) (*IndexStatsResponse, error) + // Note: this MUST be the same as the variant defined in + // logproto.proto on the Querier service. + GetVolume(context.Context, *VolumeRequest) (*VolumeResponse, error) } // UnimplementedIndexGatewayServer can be embedded to have forward compatible implementations. @@ -200,6 +215,9 @@ func (*UnimplementedIndexGatewayServer) LabelValuesForMetricName(ctx context.Con func (*UnimplementedIndexGatewayServer) GetStats(ctx context.Context, req *IndexStatsRequest) (*IndexStatsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetStats not implemented") } +func (*UnimplementedIndexGatewayServer) GetVolume(ctx context.Context, req *VolumeRequest) (*VolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolume not implemented") +} func RegisterIndexGatewayServer(s *grpc.Server, srv IndexGatewayServer) { s.RegisterService(&_IndexGateway_serviceDesc, srv) @@ -316,6 +334,24 @@ func _IndexGateway_GetStats_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _IndexGateway_GetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexGatewayServer).GetVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/indexgatewaypb.IndexGateway/GetVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexGatewayServer).GetVolume(ctx, req.(*VolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _IndexGateway_serviceDesc = grpc.ServiceDesc{ ServiceName: "indexgatewaypb.IndexGateway", HandlerType: (*IndexGatewayServer)(nil), @@ -340,6 +376,10 @@ var _IndexGateway_serviceDesc = grpc.ServiceDesc{ MethodName: "GetStats", Handler: _IndexGateway_GetStats_Handler, }, + { + MethodName: "GetVolume", + Handler: _IndexGateway_GetVolume_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.proto b/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.proto index 6d8953cb..af34e03a 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.proto +++ b/vendor/github.com/grafana/loki/pkg/logproto/indexgateway.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package indexgatewaypb; -import "gogoproto/gogo.proto"; import "pkg/logproto/logproto.proto"; option go_package = "github.com/grafana/loki/pkg/logproto"; @@ -22,4 +21,8 @@ service IndexGateway { // Note: this MUST be the same as the variant defined in // logproto.proto on the Querier service. rpc GetStats(logproto.IndexStatsRequest) returns (logproto.IndexStatsResponse) {} + + // Note: this MUST be the same as the variant defined in + // logproto.proto on the Querier service. + rpc GetVolume(logproto.VolumeRequest) returns (logproto.VolumeResponse) {} } diff --git a/vendor/github.com/grafana/loki/pkg/logproto/logproto.pb.go b/vendor/github.com/grafana/loki/pkg/logproto/logproto.pb.go index 7aa1702e..f339745f 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/logproto.pb.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/logproto.pb.go @@ -16,6 +16,7 @@ import ( stats "github.com/grafana/loki/pkg/logqlmodel/stats" _ "github.com/grafana/loki/pkg/push" github_com_grafana_loki_pkg_push "github.com/grafana/loki/pkg/push" + github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan" github_com_prometheus_common_model "github.com/prometheus/common/model" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -145,6 +146,7 @@ type StreamRate struct { StreamHashNoShard uint64 `protobuf:"varint,2,opt,name=streamHashNoShard,proto3" json:"streamHashNoShard,omitempty"` Rate int64 `protobuf:"varint,3,opt,name=rate,proto3" json:"rate,omitempty"` Tenant string `protobuf:"bytes,4,opt,name=tenant,proto3" json:"tenant,omitempty"` + Pushes uint32 `protobuf:"varint,5,opt,name=pushes,proto3" json:"pushes,omitempty"` } func (m *StreamRate) Reset() { *m = StreamRate{} } @@ -207,14 +209,22 @@ func (m *StreamRate) GetTenant() string { return "" } +func (m *StreamRate) GetPushes() uint32 { + if m != nil { + return m.Pushes + } + return 0 +} + type QueryRequest struct { - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"` - End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"` - Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` - Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"` - Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"` + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use. + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"` + Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"` + Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,9,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *QueryRequest) Reset() { *m = QueryRequest{} } @@ -249,6 +259,7 @@ func (m *QueryRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *QueryRequest) GetSelector() string { if m != nil { return m.Selector @@ -299,11 +310,12 @@ func (m *QueryRequest) GetDeletes() []*Delete { } type SampleQueryRequest struct { - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` - End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` - Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` - Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"` + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use. + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` + Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` + Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} } @@ -338,6 +350,7 @@ func (m *SampleQueryRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *SampleQueryRequest) GetSelector() string { if m != nil { return m.Selector @@ -373,6 +386,49 @@ func (m *SampleQueryRequest) GetDeletes() []*Delete { return nil } +type Plan struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (m *Plan) Reset() { *m = Plan{} } +func (*Plan) ProtoMessage() {} +func (*Plan) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{5} +} +func (m *Plan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Plan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Plan) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plan.Merge(m, src) +} +func (m *Plan) XXX_Size() int { + return m.Size() +} +func (m *Plan) XXX_DiscardUnknown() { + xxx_messageInfo_Plan.DiscardUnknown(m) +} + +var xxx_messageInfo_Plan proto.InternalMessageInfo + +func (m *Plan) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + type Delete struct { Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` @@ -382,7 +438,7 @@ type Delete struct { func (m *Delete) Reset() { *m = Delete{} } func (*Delete) ProtoMessage() {} func (*Delete) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{5} + return fileDescriptor_c28a5f14f1f4c79a, []int{6} } func (m *Delete) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +496,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{6} + return fileDescriptor_c28a5f14f1f4c79a, []int{7} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -484,7 +540,7 @@ type SampleQueryResponse struct { func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} } func (*SampleQueryResponse) ProtoMessage() {} func (*SampleQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{7} + return fileDescriptor_c28a5f14f1f4c79a, []int{8} } func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -531,7 +587,7 @@ type LabelRequest struct { func (m *LabelRequest) Reset() { *m = LabelRequest{} } func (*LabelRequest) ProtoMessage() {} func (*LabelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{8} + return fileDescriptor_c28a5f14f1f4c79a, []int{9} } func (m *LabelRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -602,7 +658,7 @@ type LabelResponse struct { func (m *LabelResponse) Reset() { *m = LabelResponse{} } func (*LabelResponse) ProtoMessage() {} func (*LabelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{9} + return fileDescriptor_c28a5f14f1f4c79a, []int{10} } func (m *LabelResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -647,7 +703,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{10} + return fileDescriptor_c28a5f14f1f4c79a, []int{11} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -706,7 +762,7 @@ type LegacySample struct { func (m *LegacySample) Reset() { *m = LegacySample{} } func (*LegacySample) ProtoMessage() {} func (*LegacySample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{11} + return fileDescriptor_c28a5f14f1f4c79a, []int{12} } func (m *LegacySample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -758,7 +814,7 @@ type Series struct { func (m *Series) Reset() { *m = Series{} } func (*Series) ProtoMessage() {} func (*Series) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{12} + return fileDescriptor_c28a5f14f1f4c79a, []int{13} } func (m *Series) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -809,16 +865,17 @@ func (m *Series) GetStreamHash() uint64 { } type TailRequest struct { - Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` - Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` - Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"` + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` // Deprecated: Do not use. + DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` + Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *TailRequest) Reset() { *m = TailRequest{} } func (*TailRequest) ProtoMessage() {} func (*TailRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{13} + return fileDescriptor_c28a5f14f1f4c79a, []int{14} } func (m *TailRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -847,6 +904,7 @@ func (m *TailRequest) XXX_DiscardUnknown() { var xxx_messageInfo_TailRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *TailRequest) GetQuery() string { if m != nil { return m.Query @@ -883,7 +941,7 @@ type TailResponse struct { func (m *TailResponse) Reset() { *m = TailResponse{} } func (*TailResponse) ProtoMessage() {} func (*TailResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{14} + return fileDescriptor_c28a5f14f1f4c79a, []int{15} } func (m *TailResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -929,7 +987,7 @@ type SeriesRequest struct { func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{15} + return fileDescriptor_c28a5f14f1f4c79a, []int{16} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -993,7 +1051,7 @@ type SeriesResponse struct { func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{16} + return fileDescriptor_c28a5f14f1f4c79a, []int{17} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1036,7 +1094,7 @@ type SeriesIdentifier struct { func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} } func (*SeriesIdentifier) ProtoMessage() {} func (*SeriesIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{17} + return fileDescriptor_c28a5f14f1f4c79a, []int{18} } func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1081,7 +1139,7 @@ type DroppedStream struct { func (m *DroppedStream) Reset() { *m = DroppedStream{} } func (*DroppedStream) ProtoMessage() {} func (*DroppedStream) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{18} + return fileDescriptor_c28a5f14f1f4c79a, []int{19} } func (m *DroppedStream) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1131,73 +1189,6 @@ func (m *DroppedStream) GetLabels() string { return "" } -type TimeSeriesChunk struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Labels []*LabelPair `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"` - Chunks []*Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks,omitempty"` -} - -func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } -func (*TimeSeriesChunk) ProtoMessage() {} -func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{19} -} -func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesChunk.Merge(m, src) -} -func (m *TimeSeriesChunk) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesChunk) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo - -func (m *TimeSeriesChunk) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesChunk) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesChunk) GetLabels() []*LabelPair { - if m != nil { - return m.Labels - } - return nil -} - -func (m *TimeSeriesChunk) GetChunks() []*Chunk { - if m != nil { - return m.Chunks - } - return nil -} - type LabelPair struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -1250,6 +1241,7 @@ func (m *LabelPair) GetValue() string { } // LegacyLabelPair exists for backwards compatibility reasons and is deprecated. Do not use. +// Use LabelPair instead. type LegacyLabelPair struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -1344,48 +1336,13 @@ func (m *Chunk) GetData() []byte { return nil } -type TransferChunksResponse struct { -} - -func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } -func (*TransferChunksResponse) ProtoMessage() {} -func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{23} -} -func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferChunksResponse.Merge(m, src) -} -func (m *TransferChunksResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferChunksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo - type TailersCountRequest struct { } func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} } func (*TailersCountRequest) ProtoMessage() {} func (*TailersCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{24} + return fileDescriptor_c28a5f14f1f4c79a, []int{23} } func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1421,7 +1378,7 @@ type TailersCountResponse struct { func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} } func (*TailersCountResponse) ProtoMessage() {} func (*TailersCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{25} + return fileDescriptor_c28a5f14f1f4c79a, []int{24} } func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1466,7 +1423,7 @@ type GetChunkIDsRequest struct { func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} } func (*GetChunkIDsRequest) ProtoMessage() {} func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{26} + return fileDescriptor_c28a5f14f1f4c79a, []int{25} } func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1523,7 +1480,7 @@ type GetChunkIDsResponse struct { func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} } func (*GetChunkIDsResponse) ProtoMessage() {} func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{27} + return fileDescriptor_c28a5f14f1f4c79a, []int{26} } func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1576,7 +1533,7 @@ type ChunkRef struct { func (m *ChunkRef) Reset() { *m = ChunkRef{} } func (*ChunkRef) ProtoMessage() {} func (*ChunkRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{28} + return fileDescriptor_c28a5f14f1f4c79a, []int{27} } func (m *ChunkRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1637,7 +1594,7 @@ type LabelValuesForMetricNameRequest struct { func (m *LabelValuesForMetricNameRequest) Reset() { *m = LabelValuesForMetricNameRequest{} } func (*LabelValuesForMetricNameRequest) ProtoMessage() {} func (*LabelValuesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{29} + return fileDescriptor_c28a5f14f1f4c79a, []int{28} } func (m *LabelValuesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1696,7 +1653,7 @@ type LabelNamesForMetricNameRequest struct { func (m *LabelNamesForMetricNameRequest) Reset() { *m = LabelNamesForMetricNameRequest{} } func (*LabelNamesForMetricNameRequest) ProtoMessage() {} func (*LabelNamesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{30} + return fileDescriptor_c28a5f14f1f4c79a, []int{29} } func (m *LabelNamesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,10 +1689,62 @@ func (m *LabelNamesForMetricNameRequest) GetMetricName() string { return "" } +type LineFilterExpression struct { + Operator int64 `protobuf:"varint,1,opt,name=operator,proto3" json:"operator,omitempty"` + Match string `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` +} + +func (m *LineFilterExpression) Reset() { *m = LineFilterExpression{} } +func (*LineFilterExpression) ProtoMessage() {} +func (*LineFilterExpression) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{30} +} +func (m *LineFilterExpression) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LineFilterExpression) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LineFilterExpression.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LineFilterExpression) XXX_Merge(src proto.Message) { + xxx_messageInfo_LineFilterExpression.Merge(m, src) +} +func (m *LineFilterExpression) XXX_Size() int { + return m.Size() +} +func (m *LineFilterExpression) XXX_DiscardUnknown() { + xxx_messageInfo_LineFilterExpression.DiscardUnknown(m) +} + +var xxx_messageInfo_LineFilterExpression proto.InternalMessageInfo + +func (m *LineFilterExpression) GetOperator() int64 { + if m != nil { + return m.Operator + } + return 0 +} + +func (m *LineFilterExpression) GetMatch() string { + if m != nil { + return m.Match + } + return "" +} + type GetChunkRefRequest struct { From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"` Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"` Matchers string `protobuf:"bytes,3,opt,name=matchers,proto3" json:"matchers,omitempty"` + Filters []*LineFilterExpression `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` } func (m *GetChunkRefRequest) Reset() { *m = GetChunkRefRequest{} } @@ -1777,6 +1786,13 @@ func (m *GetChunkRefRequest) GetMatchers() string { return "" } +func (m *GetChunkRefRequest) GetFilters() []*LineFilterExpression { + if m != nil { + return m.Filters + } + return nil +} + type GetChunkRefResponse struct { Refs []*ChunkRef `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty"` } @@ -2277,205 +2293,398 @@ func (m *IndexStatsResponse) GetEntries() uint64 { return 0 } -func init() { - proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value) - proto.RegisterType((*StreamRatesRequest)(nil), "logproto.StreamRatesRequest") - proto.RegisterType((*StreamRatesResponse)(nil), "logproto.StreamRatesResponse") - proto.RegisterType((*StreamRate)(nil), "logproto.StreamRate") - proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest") - proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest") - proto.RegisterType((*Delete)(nil), "logproto.Delete") - proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") - proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") - proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest") - proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse") - proto.RegisterType((*Sample)(nil), "logproto.Sample") - proto.RegisterType((*LegacySample)(nil), "logproto.LegacySample") - proto.RegisterType((*Series)(nil), "logproto.Series") - proto.RegisterType((*TailRequest)(nil), "logproto.TailRequest") - proto.RegisterType((*TailResponse)(nil), "logproto.TailResponse") - proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest") - proto.RegisterType((*SeriesResponse)(nil), "logproto.SeriesResponse") - proto.RegisterType((*SeriesIdentifier)(nil), "logproto.SeriesIdentifier") - proto.RegisterMapType((map[string]string)(nil), "logproto.SeriesIdentifier.LabelsEntry") - proto.RegisterType((*DroppedStream)(nil), "logproto.DroppedStream") - proto.RegisterType((*TimeSeriesChunk)(nil), "logproto.TimeSeriesChunk") - proto.RegisterType((*LabelPair)(nil), "logproto.LabelPair") - proto.RegisterType((*LegacyLabelPair)(nil), "logproto.LegacyLabelPair") - proto.RegisterType((*Chunk)(nil), "logproto.Chunk") - proto.RegisterType((*TransferChunksResponse)(nil), "logproto.TransferChunksResponse") - proto.RegisterType((*TailersCountRequest)(nil), "logproto.TailersCountRequest") - proto.RegisterType((*TailersCountResponse)(nil), "logproto.TailersCountResponse") - proto.RegisterType((*GetChunkIDsRequest)(nil), "logproto.GetChunkIDsRequest") - proto.RegisterType((*GetChunkIDsResponse)(nil), "logproto.GetChunkIDsResponse") - proto.RegisterType((*ChunkRef)(nil), "logproto.ChunkRef") - proto.RegisterType((*LabelValuesForMetricNameRequest)(nil), "logproto.LabelValuesForMetricNameRequest") - proto.RegisterType((*LabelNamesForMetricNameRequest)(nil), "logproto.LabelNamesForMetricNameRequest") - proto.RegisterType((*GetChunkRefRequest)(nil), "logproto.GetChunkRefRequest") - proto.RegisterType((*GetChunkRefResponse)(nil), "logproto.GetChunkRefResponse") - proto.RegisterType((*GetSeriesRequest)(nil), "logproto.GetSeriesRequest") - proto.RegisterType((*GetSeriesResponse)(nil), "logproto.GetSeriesResponse") - proto.RegisterType((*IndexSeries)(nil), "logproto.IndexSeries") - proto.RegisterType((*QueryIndexResponse)(nil), "logproto.QueryIndexResponse") - proto.RegisterType((*Row)(nil), "logproto.Row") - proto.RegisterType((*QueryIndexRequest)(nil), "logproto.QueryIndexRequest") - proto.RegisterType((*IndexQuery)(nil), "logproto.IndexQuery") - proto.RegisterType((*IndexStatsRequest)(nil), "logproto.IndexStatsRequest") - proto.RegisterType((*IndexStatsResponse)(nil), "logproto.IndexStatsResponse") +type VolumeRequest struct { + From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"` + Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"` + Matchers string `protobuf:"bytes,3,opt,name=matchers,proto3" json:"matchers,omitempty"` + Limit int32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + Step int64 `protobuf:"varint,5,opt,name=step,proto3" json:"step,omitempty"` + TargetLabels []string `protobuf:"bytes,6,rep,name=targetLabels,proto3" json:"targetLabels,omitempty"` + AggregateBy string `protobuf:"bytes,7,opt,name=aggregateBy,proto3" json:"aggregateBy,omitempty"` } -func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } - -var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4b, 0x6f, 0x1b, 0xc7, - 0x99, 0x43, 0x2e, 0x29, 0xf2, 0x23, 0x25, 0xd1, 0x23, 0xc6, 0x66, 0x69, 0x9b, 0x94, 0x17, 0xa9, - 0x2d, 0xd8, 0x0e, 0x59, 0x2b, 0x6d, 0xea, 0xd8, 0x4d, 0x0b, 0x53, 0x8a, 0x1d, 0xf9, 0x9d, 0x91, - 0xeb, 0x02, 0x41, 0x03, 0x63, 0x45, 0x0e, 0x1f, 0x10, 0x97, 0x4b, 0xef, 0x0e, 0xeb, 0x08, 0xe8, - 0xa1, 0xa7, 0xde, 0x02, 0xe4, 0x56, 0xf4, 0xd6, 0x43, 0x81, 0x16, 0x05, 0x7a, 0x29, 0xd0, 0x6b, - 0xdb, 0x43, 0x81, 0xba, 0x37, 0xf7, 0x16, 0xf4, 0xc0, 0xd6, 0xf2, 0xa5, 0xd0, 0x29, 0xbf, 0xa0, - 0x28, 0xe6, 0xb5, 0x3b, 0x5c, 0x51, 0x49, 0xe8, 0x1a, 0x08, 0x72, 0x11, 0x77, 0xbe, 0xf9, 0xe6, - 0x9b, 0xef, 0xfd, 0x18, 0xc1, 0xc9, 0xd1, 0x6e, 0xb7, 0x31, 0xf0, 0xba, 0x23, 0xdf, 0x63, 0x5e, - 0xf8, 0x51, 0x17, 0x7f, 0x71, 0x56, 0xaf, 0x2b, 0xa5, 0xae, 0xd7, 0xf5, 0x24, 0x0e, 0xff, 0x92, - 0xfb, 0x95, 0x5a, 0xd7, 0xf3, 0xba, 0x03, 0xda, 0x10, 0xab, 0x9d, 0x71, 0xa7, 0xc1, 0xfa, 0x2e, - 0x0d, 0x98, 0xe3, 0x8e, 0x14, 0xc2, 0xaa, 0xa2, 0xfe, 0x78, 0xe0, 0x7a, 0x6d, 0x3a, 0x68, 0x04, - 0xcc, 0x61, 0x81, 0xfc, 0xab, 0x30, 0x56, 0x38, 0xc6, 0x68, 0x1c, 0xf4, 0xc4, 0x1f, 0x09, 0xb4, - 0x4b, 0x80, 0xb7, 0x99, 0x4f, 0x1d, 0x97, 0x38, 0x8c, 0x06, 0x84, 0x3e, 0x1e, 0xd3, 0x80, 0xd9, - 0x77, 0x60, 0x65, 0x0a, 0x1a, 0x8c, 0xbc, 0x61, 0x40, 0xf1, 0x5b, 0x90, 0x0f, 0x22, 0x70, 0x19, - 0xad, 0xa6, 0xd6, 0xf2, 0xeb, 0xa5, 0x7a, 0x28, 0x4a, 0x74, 0x86, 0x98, 0x88, 0xf6, 0xcf, 0x11, - 0x40, 0xb4, 0x87, 0xab, 0x00, 0x72, 0xf7, 0x3d, 0x27, 0xe8, 0x95, 0xd1, 0x2a, 0x5a, 0xb3, 0x88, - 0x01, 0xc1, 0x17, 0xe1, 0x58, 0xb4, 0xba, 0xeb, 0x6d, 0xf7, 0x1c, 0xbf, 0x5d, 0x4e, 0x0a, 0xb4, - 0xc3, 0x1b, 0x18, 0x83, 0xe5, 0x3b, 0x8c, 0x96, 0x53, 0xab, 0x68, 0x2d, 0x45, 0xc4, 0x37, 0x3e, - 0x0e, 0x19, 0x46, 0x87, 0xce, 0x90, 0x95, 0xad, 0x55, 0xb4, 0x96, 0x23, 0x6a, 0x65, 0xff, 0x2d, - 0x09, 0x85, 0xf7, 0xc7, 0xd4, 0xdf, 0x53, 0x82, 0xe2, 0x0a, 0x64, 0x03, 0x3a, 0xa0, 0x2d, 0xe6, - 0xf9, 0x82, 0x91, 0x1c, 0x09, 0xd7, 0xb8, 0x04, 0xe9, 0x41, 0xdf, 0xed, 0x33, 0x71, 0xf5, 0x22, - 0x91, 0x0b, 0x7c, 0x05, 0xd2, 0x01, 0x73, 0x7c, 0x26, 0xee, 0xcb, 0xaf, 0x57, 0xea, 0xd2, 0x30, - 0x75, 0x6d, 0x98, 0xfa, 0x03, 0x6d, 0x98, 0x66, 0xf6, 0xe9, 0xa4, 0x96, 0xf8, 0xe4, 0x5f, 0x35, - 0x44, 0xe4, 0x11, 0xfc, 0x16, 0xa4, 0xe8, 0xb0, 0x2d, 0x78, 0xfa, 0xb2, 0x27, 0xf9, 0x01, 0x7c, - 0x09, 0x72, 0xed, 0xbe, 0x4f, 0x5b, 0xac, 0xef, 0x0d, 0xcb, 0xe9, 0x55, 0xb4, 0xb6, 0xb4, 0xbe, - 0x12, 0x69, 0x7d, 0x53, 0x6f, 0x91, 0x08, 0x0b, 0x5f, 0x84, 0x4c, 0xc0, 0xd5, 0x13, 0x94, 0x17, - 0x56, 0x53, 0x6b, 0xb9, 0x66, 0xe9, 0x60, 0x52, 0x2b, 0x4a, 0xc8, 0x45, 0xcf, 0xed, 0x33, 0xea, - 0x8e, 0xd8, 0x1e, 0x51, 0x38, 0xf8, 0x3c, 0x2c, 0xb4, 0xe9, 0x80, 0x72, 0xa3, 0x66, 0x85, 0x51, - 0x8b, 0x06, 0x79, 0xb1, 0x41, 0x34, 0xc2, 0x4d, 0x2b, 0x9b, 0x29, 0x2e, 0xd8, 0xff, 0x45, 0x80, - 0xb7, 0x1d, 0x77, 0x34, 0xa0, 0x5f, 0x5a, 0x9f, 0xa1, 0xe6, 0x92, 0x2f, 0xad, 0xb9, 0xd4, 0xbc, - 0x9a, 0x8b, 0xd4, 0x60, 0xcd, 0xa7, 0x86, 0xf4, 0x17, 0xa8, 0xc1, 0xbe, 0x0d, 0x19, 0x09, 0xfa, - 0x22, 0x1f, 0x8a, 0x64, 0x4e, 0x69, 0x69, 0x8a, 0x91, 0x34, 0x29, 0xc1, 0xa7, 0xfd, 0x2b, 0x04, - 0x8b, 0x4a, 0x91, 0x2a, 0xd6, 0x76, 0x60, 0x41, 0xfa, 0xba, 0x8e, 0xb3, 0x13, 0xf1, 0x38, 0xbb, - 0xd6, 0x76, 0x46, 0x8c, 0xfa, 0xcd, 0xc6, 0xd3, 0x49, 0x0d, 0xfd, 0x73, 0x52, 0x3b, 0xd7, 0xed, - 0xb3, 0xde, 0x78, 0xa7, 0xde, 0xf2, 0xdc, 0x46, 0xd7, 0x77, 0x3a, 0xce, 0xd0, 0x69, 0x0c, 0xbc, - 0xdd, 0x7e, 0x43, 0xc7, 0xbd, 0x8e, 0x4f, 0x4d, 0x18, 0x5f, 0x10, 0xdc, 0xb1, 0x40, 0x59, 0x64, - 0xb9, 0x2e, 0xd3, 0xc5, 0xd6, 0xb0, 0x4b, 0x03, 0x4e, 0xd9, 0xe2, 0xca, 0x24, 0x12, 0xc7, 0xfe, - 0x29, 0xac, 0x4c, 0x19, 0x5c, 0xf1, 0x79, 0x19, 0x32, 0x01, 0xf5, 0xfb, 0x61, 0x3a, 0x30, 0x54, - 0xb6, 0x2d, 0xe0, 0xcd, 0x25, 0xc5, 0x5f, 0x46, 0xae, 0x89, 0xc2, 0x9f, 0xef, 0xf6, 0xbf, 0x22, - 0x28, 0xdc, 0x76, 0x76, 0xe8, 0x40, 0x7b, 0x1a, 0x06, 0x6b, 0xe8, 0xb8, 0x54, 0x69, 0x5c, 0x7c, - 0xf3, 0xb0, 0xff, 0x89, 0x33, 0x18, 0x53, 0x49, 0x32, 0x4b, 0xd4, 0x6a, 0xde, 0x98, 0x45, 0x2f, - 0x1d, 0xb3, 0x28, 0xf2, 0xbc, 0x12, 0xa4, 0x1f, 0x73, 0x45, 0x89, 0x78, 0xcd, 0x11, 0xb9, 0xb0, - 0xcf, 0xc1, 0xa2, 0x92, 0x42, 0xa9, 0x2f, 0x62, 0x99, 0xab, 0x2f, 0xa7, 0x59, 0xb6, 0x5d, 0xc8, - 0x48, 0x6d, 0xe3, 0xd7, 0x21, 0x17, 0xe6, 0x7a, 0x21, 0x6d, 0xaa, 0x99, 0x39, 0x98, 0xd4, 0x92, - 0x2c, 0x20, 0xd1, 0x06, 0xae, 0x41, 0x5a, 0x9c, 0x14, 0x92, 0xa3, 0x66, 0xee, 0x60, 0x52, 0x93, - 0x00, 0x22, 0x7f, 0xf0, 0x29, 0xb0, 0x7a, 0x3c, 0xdd, 0x72, 0x15, 0x58, 0xcd, 0xec, 0xc1, 0xa4, - 0x26, 0xd6, 0x44, 0xfc, 0xb5, 0x6f, 0x40, 0xe1, 0x36, 0xed, 0x3a, 0xad, 0x3d, 0x75, 0x69, 0x49, - 0x93, 0xe3, 0x17, 0x22, 0x4d, 0xe3, 0x0c, 0x14, 0xc2, 0x1b, 0x1f, 0xb9, 0x81, 0x72, 0xea, 0x7c, - 0x08, 0xbb, 0x13, 0xd8, 0xbf, 0x44, 0xa0, 0xec, 0x8c, 0x6d, 0xc8, 0x0c, 0xb8, 0xac, 0x81, 0xb4, - 0x51, 0x13, 0x0e, 0x26, 0x35, 0x05, 0x21, 0xea, 0x17, 0x5f, 0x85, 0x85, 0x40, 0xdc, 0xc8, 0x89, - 0xc5, 0xdd, 0x47, 0x6c, 0x34, 0x97, 0xb9, 0x1b, 0x1c, 0x4c, 0x6a, 0x1a, 0x91, 0xe8, 0x0f, 0x5c, - 0x9f, 0xaa, 0x23, 0x52, 0xb0, 0xa5, 0x83, 0x49, 0xcd, 0x80, 0x9a, 0x75, 0xc5, 0xfe, 0x05, 0x82, - 0xfc, 0x03, 0xa7, 0x1f, 0xba, 0x50, 0x68, 0x22, 0x64, 0x98, 0x88, 0x87, 0x73, 0x9b, 0x0e, 0x9c, - 0xbd, 0xeb, 0x9e, 0x2f, 0x68, 0x2e, 0x92, 0x70, 0x1d, 0x95, 0x04, 0x6b, 0x66, 0x49, 0x48, 0xcf, - 0x9d, 0xd8, 0x6e, 0x5a, 0xd9, 0x64, 0x31, 0x65, 0xff, 0x1e, 0x41, 0x41, 0x72, 0xa6, 0xdc, 0xe2, - 0xc7, 0x90, 0x91, 0x8c, 0x0b, 0xde, 0x3e, 0x27, 0xf8, 0x2f, 0xcc, 0x13, 0xf8, 0x8a, 0x26, 0xfe, - 0x01, 0x2c, 0xb5, 0x7d, 0x6f, 0x34, 0xa2, 0xed, 0x6d, 0x95, 0x62, 0x92, 0xf1, 0x14, 0xb3, 0x69, - 0xee, 0x93, 0x18, 0xba, 0xfd, 0x77, 0x04, 0x8b, 0x2a, 0x9a, 0x95, 0x2e, 0x43, 0x1d, 0xa0, 0x97, - 0x4e, 0xee, 0xc9, 0x79, 0x93, 0xfb, 0x71, 0xc8, 0x74, 0x7d, 0x6f, 0x3c, 0x0a, 0xca, 0x29, 0x19, - 0x3b, 0x72, 0x35, 0x5f, 0xd2, 0xb7, 0x6f, 0xc2, 0x92, 0x16, 0xe5, 0x88, 0x94, 0x56, 0x89, 0xa7, - 0xb4, 0xad, 0x36, 0x1d, 0xb2, 0x7e, 0xa7, 0x1f, 0x26, 0x29, 0x85, 0x6f, 0x7f, 0x8c, 0xa0, 0x18, - 0x47, 0xc1, 0xdf, 0x37, 0xe2, 0x80, 0x93, 0x3b, 0x7b, 0x34, 0xb9, 0xba, 0x48, 0x0e, 0xc1, 0xbb, - 0x43, 0xe6, 0xef, 0xe9, 0x18, 0xa9, 0xbc, 0x0d, 0x79, 0x03, 0xcc, 0x8b, 0xc7, 0x2e, 0xd5, 0x3e, - 0xcb, 0x3f, 0xa3, 0x60, 0x4d, 0x4a, 0x3f, 0x16, 0x8b, 0x2b, 0xc9, 0xcb, 0x88, 0x7b, 0xfc, 0xe2, - 0x94, 0x25, 0xf1, 0x65, 0xb0, 0x3a, 0xbe, 0xe7, 0xce, 0x65, 0x26, 0x71, 0x02, 0x7f, 0x1b, 0x92, - 0xcc, 0x9b, 0xcb, 0x48, 0x49, 0xe6, 0x71, 0x1b, 0x29, 0xe1, 0x53, 0xb2, 0x13, 0x93, 0x2b, 0xfb, - 0x77, 0x08, 0x96, 0xf9, 0x19, 0xa9, 0x81, 0x8d, 0xde, 0x78, 0xb8, 0x8b, 0xd7, 0xa0, 0xc8, 0x6f, - 0x7a, 0xd4, 0x57, 0x15, 0xe0, 0x51, 0xbf, 0xad, 0xc4, 0x5c, 0xe2, 0x70, 0x5d, 0x18, 0xb6, 0xda, - 0xf8, 0x04, 0x2c, 0x8c, 0x03, 0x89, 0x20, 0x65, 0xce, 0xf0, 0xe5, 0x56, 0x1b, 0x5f, 0x30, 0xae, - 0xe3, 0xba, 0x36, 0xda, 0x24, 0xa1, 0xc3, 0xfb, 0x4e, 0xdf, 0x0f, 0x93, 0xcf, 0x39, 0xc8, 0xb4, - 0xf8, 0xc5, 0xd2, 0x4f, 0x78, 0x05, 0x0a, 0x91, 0x05, 0x43, 0x44, 0x6d, 0xdb, 0xdf, 0x81, 0x5c, - 0x78, 0x7a, 0x66, 0xe1, 0x99, 0x69, 0x01, 0xfb, 0x2a, 0x2c, 0xcb, 0xa4, 0x3a, 0xfb, 0x70, 0x61, - 0xd6, 0xe1, 0x82, 0x3e, 0x7c, 0x12, 0xd2, 0x52, 0x2b, 0x18, 0xac, 0xb6, 0xc3, 0x1c, 0x7d, 0x84, - 0x7f, 0xdb, 0x65, 0x38, 0xfe, 0xc0, 0x77, 0x86, 0x41, 0x87, 0xfa, 0x02, 0x29, 0xf4, 0x5d, 0xfb, - 0x35, 0x58, 0xe1, 0x89, 0x84, 0xfa, 0xc1, 0x86, 0x37, 0x1e, 0x32, 0xdd, 0xd0, 0x5f, 0x84, 0xd2, - 0x34, 0x58, 0xb9, 0x7a, 0x09, 0xd2, 0x2d, 0x0e, 0x10, 0xd4, 0x17, 0x89, 0x5c, 0xd8, 0xbf, 0x46, - 0x80, 0x6f, 0x50, 0x26, 0x48, 0x6f, 0x6d, 0x06, 0x46, 0x73, 0xe7, 0x3a, 0xac, 0xd5, 0xa3, 0x7e, - 0xa0, 0x1b, 0x1d, 0xbd, 0xfe, 0x2a, 0x9a, 0x3b, 0xfb, 0x12, 0xac, 0x4c, 0x71, 0xa9, 0x64, 0xaa, - 0x40, 0xb6, 0xa5, 0x60, 0xaa, 0xa8, 0x86, 0x6b, 0xfb, 0x0f, 0x49, 0xc8, 0x4a, 0xdb, 0xd2, 0x0e, - 0xbe, 0x04, 0xf9, 0x0e, 0xf7, 0x35, 0x7f, 0xe4, 0xf7, 0x95, 0x0a, 0xac, 0xe6, 0xf2, 0xc1, 0xa4, - 0x66, 0x82, 0x89, 0xb9, 0xc0, 0x6f, 0xc4, 0x1c, 0xaf, 0x59, 0xda, 0x9f, 0xd4, 0x32, 0x3f, 0xe4, - 0xce, 0xb7, 0xc9, 0xcb, 0x9b, 0x70, 0xc3, 0xcd, 0xd0, 0x1d, 0x6f, 0xa9, 0x68, 0x13, 0x9d, 0x5e, - 0xf3, 0xbb, 0x9c, 0xfd, 0x58, 0xbe, 0x1e, 0xf9, 0x9e, 0x4b, 0x59, 0x8f, 0x8e, 0x83, 0x46, 0xcb, - 0x73, 0x5d, 0x6f, 0xd8, 0x10, 0xe3, 0x9b, 0x10, 0x9a, 0xd7, 0x68, 0x7e, 0x5c, 0x05, 0xe0, 0x03, - 0x58, 0x60, 0x3d, 0xdf, 0x1b, 0x77, 0x7b, 0xa2, 0xfc, 0xa4, 0x9a, 0x57, 0xe6, 0xa7, 0xa7, 0x29, - 0x10, 0xfd, 0x81, 0xcf, 0x70, 0x6d, 0xd1, 0xd6, 0x6e, 0x30, 0x76, 0x45, 0xfd, 0x5a, 0x6c, 0xa6, - 0x0f, 0x26, 0x35, 0xf4, 0x06, 0x09, 0xc1, 0xf6, 0xc7, 0x49, 0xa8, 0x09, 0x17, 0x7e, 0x28, 0x7a, - 0x93, 0xeb, 0x9e, 0x7f, 0x87, 0x32, 0xbf, 0xdf, 0xba, 0xeb, 0xb8, 0x54, 0xfb, 0x46, 0x0d, 0xf2, - 0xae, 0x00, 0x3e, 0x32, 0x82, 0x03, 0xdc, 0x10, 0x0f, 0x9f, 0x06, 0x10, 0x61, 0x27, 0xf7, 0x65, - 0x9c, 0xe4, 0x04, 0x44, 0x6c, 0x6f, 0x4c, 0x69, 0xaa, 0x31, 0xa7, 0x64, 0x4a, 0x43, 0x5b, 0x71, - 0x0d, 0xcd, 0x4d, 0x27, 0x54, 0x8b, 0xe9, 0xeb, 0xe9, 0x69, 0x5f, 0xb7, 0xff, 0x81, 0xa0, 0x7a, - 0x5b, 0x73, 0xfe, 0x92, 0xea, 0xd0, 0xf2, 0x26, 0x5f, 0x91, 0xbc, 0xa9, 0xff, 0x4f, 0x5e, 0xfb, - 0x2f, 0x46, 0xc8, 0x13, 0xda, 0xd1, 0x72, 0x6c, 0x18, 0xe5, 0xe2, 0x55, 0xb0, 0x99, 0x7c, 0x85, - 0x66, 0x49, 0xc5, 0xcc, 0xf2, 0x4e, 0x94, 0x0e, 0x84, 0x04, 0x2a, 0x1d, 0x9c, 0x05, 0xcb, 0xa7, - 0x1d, 0x5d, 0x7c, 0x71, 0x3c, 0xc7, 0xd3, 0x0e, 0x11, 0xfb, 0xf6, 0x9f, 0x10, 0x14, 0x6f, 0x50, - 0x36, 0xdd, 0xd6, 0x7c, 0x9d, 0xe4, 0x7f, 0x0f, 0x8e, 0x19, 0xfc, 0x2b, 0xe9, 0xdf, 0x8c, 0xf5, - 0x32, 0xaf, 0x45, 0xf2, 0x6f, 0x0d, 0xdb, 0xf4, 0x23, 0x35, 0xa3, 0x4d, 0xb7, 0x31, 0xf7, 0x21, - 0x6f, 0x6c, 0xe2, 0x6b, 0xb1, 0x06, 0x66, 0x56, 0x51, 0x6d, 0x96, 0x94, 0x4c, 0x72, 0x4a, 0x53, - 0xed, 0x69, 0x58, 0xee, 0xb7, 0x01, 0x8b, 0xb1, 0x51, 0x90, 0x35, 0x33, 0xb5, 0x80, 0xde, 0x0a, - 0xfb, 0x99, 0x70, 0x8d, 0xcf, 0x80, 0xe5, 0x7b, 0x4f, 0x74, 0x67, 0xba, 0x18, 0x5d, 0x49, 0xbc, - 0x27, 0x44, 0x6c, 0xd9, 0x57, 0x21, 0x45, 0xbc, 0x27, 0xb8, 0x0a, 0xe0, 0x3b, 0xc3, 0x2e, 0x7d, - 0x18, 0x0e, 0x2c, 0x05, 0x62, 0x40, 0x8e, 0xa8, 0xaf, 0x1b, 0x70, 0xcc, 0xe4, 0x48, 0x9a, 0xbb, - 0x0e, 0x0b, 0x1c, 0xd8, 0x9f, 0xf5, 0xb8, 0x25, 0x10, 0xe5, 0xec, 0xab, 0x91, 0xb8, 0xcf, 0x40, - 0x04, 0xc7, 0xa7, 0x20, 0xc7, 0x9c, 0x9d, 0x01, 0xbd, 0x1b, 0xc5, 0x7c, 0x04, 0xe0, 0xbb, 0x7c, - 0xd6, 0x7a, 0x68, 0x34, 0x0a, 0x11, 0x00, 0x9f, 0x87, 0x62, 0xc4, 0xf3, 0x7d, 0x9f, 0x76, 0xfa, - 0x1f, 0x09, 0x0b, 0x17, 0xc8, 0x21, 0x38, 0x5e, 0x83, 0xe5, 0x08, 0xb6, 0x2d, 0xca, 0xae, 0x25, - 0x50, 0xe3, 0x60, 0xae, 0x1b, 0x21, 0xee, 0xbb, 0x8f, 0xc7, 0xce, 0x40, 0x24, 0xb2, 0x02, 0x31, - 0x20, 0xf6, 0x9f, 0x11, 0x1c, 0x93, 0xa6, 0xe6, 0x53, 0xf6, 0xd7, 0xd1, 0xeb, 0x7f, 0x83, 0x00, - 0x9b, 0x12, 0x28, 0xd7, 0xfa, 0xa6, 0xf9, 0x7c, 0xc2, 0xeb, 0x7a, 0x5e, 0x8c, 0x90, 0x12, 0x14, - 0xbd, 0x80, 0xd8, 0x61, 0x0b, 0x28, 0xde, 0x17, 0xe5, 0x8c, 0x2a, 0x21, 0xba, 0xfb, 0xe3, 0xa3, - 0xf5, 0xce, 0x1e, 0xa3, 0x81, 0x9a, 0x30, 0xc5, 0x68, 0x2d, 0x00, 0x44, 0xfe, 0xf0, 0xbb, 0xe8, - 0x90, 0x09, 0xaf, 0xb1, 0xa2, 0xbb, 0x14, 0x88, 0xe8, 0x8f, 0xf3, 0x67, 0x21, 0x17, 0x3e, 0xd5, - 0xe1, 0x3c, 0x2c, 0x5c, 0xbf, 0x47, 0x7e, 0x74, 0x8d, 0x6c, 0x16, 0x13, 0xb8, 0x00, 0xd9, 0xe6, - 0xb5, 0x8d, 0x5b, 0x62, 0x85, 0xd6, 0xff, 0x68, 0x69, 0x2f, 0xf4, 0xf1, 0xf7, 0x20, 0x2d, 0x5d, - 0xeb, 0x78, 0xe4, 0x88, 0xe6, 0x83, 0x5b, 0xe5, 0xc4, 0x21, 0xb8, 0x6a, 0x04, 0x13, 0xdf, 0x42, - 0xf8, 0x2e, 0xe4, 0x05, 0x50, 0x0d, 0xf5, 0xa7, 0xe2, 0xb3, 0xf5, 0x14, 0xa5, 0xd3, 0x47, 0xec, - 0x1a, 0xf4, 0xae, 0x40, 0x5a, 0x44, 0xb7, 0xc9, 0x8d, 0xf9, 0x28, 0x63, 0x72, 0x33, 0xf5, 0xcc, - 0x61, 0x27, 0xf0, 0xdb, 0x60, 0xf1, 0x0e, 0x14, 0x1b, 0x09, 0xc8, 0x98, 0xc5, 0x2b, 0xc7, 0xe3, - 0x60, 0xe3, 0xda, 0x77, 0xc2, 0x27, 0x85, 0x13, 0xf1, 0xd1, 0x49, 0x1f, 0x2f, 0x1f, 0xde, 0x08, - 0x6f, 0xbe, 0x27, 0x67, 0x6b, 0xdd, 0xfb, 0xe2, 0xd3, 0xd3, 0x57, 0xc5, 0x5a, 0xe5, 0x4a, 0xf5, - 0xa8, 0xed, 0x90, 0xe0, 0x6d, 0xc8, 0x1b, 0x7d, 0xa7, 0xa9, 0xd6, 0xc3, 0x4d, 0xb3, 0xa9, 0xd6, - 0x19, 0xcd, 0xaa, 0x9d, 0xc0, 0x37, 0x20, 0xcb, 0xd3, 0x36, 0xf7, 0x5e, 0x7c, 0x32, 0x9e, 0x9d, - 0x8d, 0xa8, 0xac, 0x9c, 0x9a, 0xbd, 0xa9, 0x09, 0xad, 0x7f, 0x08, 0x59, 0x3d, 0x22, 0xe1, 0xf7, - 0x61, 0x69, 0x7a, 0x40, 0xc0, 0xdf, 0x30, 0xc4, 0x9a, 0x9e, 0xbb, 0x2a, 0xab, 0xc6, 0xd6, 0xec, - 0xa9, 0x22, 0xb1, 0x86, 0xd6, 0x3f, 0xd4, 0x6f, 0xf8, 0x9b, 0x0e, 0x73, 0xf0, 0x3d, 0x58, 0x12, - 0x5c, 0x87, 0x8f, 0xfc, 0x53, 0xde, 0x75, 0xe8, 0x3f, 0x0a, 0x53, 0xde, 0x75, 0xf8, 0x3f, 0x0b, - 0x76, 0xa2, 0xf9, 0xc1, 0xb3, 0xe7, 0xd5, 0xc4, 0xa7, 0xcf, 0xab, 0x89, 0xcf, 0x9e, 0x57, 0xd1, - 0xcf, 0xf6, 0xab, 0xe8, 0xb7, 0xfb, 0x55, 0xf4, 0x74, 0xbf, 0x8a, 0x9e, 0xed, 0x57, 0xd1, 0xbf, - 0xf7, 0xab, 0xe8, 0x3f, 0xfb, 0xd5, 0xc4, 0x67, 0xfb, 0x55, 0xf4, 0xc9, 0x8b, 0x6a, 0xe2, 0xd9, - 0x8b, 0x6a, 0xe2, 0xd3, 0x17, 0xd5, 0xc4, 0x07, 0xaf, 0x7f, 0xde, 0xd3, 0x87, 0xbe, 0x71, 0x27, - 0x23, 0x7e, 0xde, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0x75, 0x18, 0x31, 0x82, 0x19, - 0x00, 0x00, +func (m *VolumeRequest) Reset() { *m = VolumeRequest{} } +func (*VolumeRequest) ProtoMessage() {} +func (*VolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{42} } - -func (x Direction) String() string { - s, ok := Direction_name[int32(x)] - if ok { - return s +func (m *VolumeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return strconv.Itoa(int(x)) } -func (this *StreamRatesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil +func (m *VolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeRequest.Merge(m, src) +} +func (m *VolumeRequest) XXX_Size() int { + return m.Size() +} +func (m *VolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeRequest proto.InternalMessageInfo + +func (m *VolumeRequest) GetMatchers() string { + if m != nil { + return m.Matchers } + return "" +} - that1, ok := that.(*StreamRatesRequest) - if !ok { +func (m *VolumeRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *VolumeRequest) GetStep() int64 { + if m != nil { + return m.Step + } + return 0 +} + +func (m *VolumeRequest) GetTargetLabels() []string { + if m != nil { + return m.TargetLabels + } + return nil +} + +func (m *VolumeRequest) GetAggregateBy() string { + if m != nil { + return m.AggregateBy + } + return "" +} + +type VolumeResponse struct { + Volumes []Volume `protobuf:"bytes,1,rep,name=volumes,proto3" json:"volumes"` + Limit int32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (m *VolumeResponse) Reset() { *m = VolumeResponse{} } +func (*VolumeResponse) ProtoMessage() {} +func (*VolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{43} +} +func (m *VolumeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeResponse.Merge(m, src) +} +func (m *VolumeResponse) XXX_Size() int { + return m.Size() +} +func (m *VolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeResponse proto.InternalMessageInfo + +func (m *VolumeResponse) GetVolumes() []Volume { + if m != nil { + return m.Volumes + } + return nil +} + +func (m *VolumeResponse) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +type Volume struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + Volume uint64 `protobuf:"varint,3,opt,name=volume,proto3" json:"volume"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{44} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Volume) GetVolume() uint64 { + if m != nil { + return m.Volume + } + return 0 +} + +func init() { + proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value) + proto.RegisterType((*StreamRatesRequest)(nil), "logproto.StreamRatesRequest") + proto.RegisterType((*StreamRatesResponse)(nil), "logproto.StreamRatesResponse") + proto.RegisterType((*StreamRate)(nil), "logproto.StreamRate") + proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest") + proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest") + proto.RegisterType((*Plan)(nil), "logproto.Plan") + proto.RegisterType((*Delete)(nil), "logproto.Delete") + proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") + proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") + proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest") + proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse") + proto.RegisterType((*Sample)(nil), "logproto.Sample") + proto.RegisterType((*LegacySample)(nil), "logproto.LegacySample") + proto.RegisterType((*Series)(nil), "logproto.Series") + proto.RegisterType((*TailRequest)(nil), "logproto.TailRequest") + proto.RegisterType((*TailResponse)(nil), "logproto.TailResponse") + proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest") + proto.RegisterType((*SeriesResponse)(nil), "logproto.SeriesResponse") + proto.RegisterType((*SeriesIdentifier)(nil), "logproto.SeriesIdentifier") + proto.RegisterMapType((map[string]string)(nil), "logproto.SeriesIdentifier.LabelsEntry") + proto.RegisterType((*DroppedStream)(nil), "logproto.DroppedStream") + proto.RegisterType((*LabelPair)(nil), "logproto.LabelPair") + proto.RegisterType((*LegacyLabelPair)(nil), "logproto.LegacyLabelPair") + proto.RegisterType((*Chunk)(nil), "logproto.Chunk") + proto.RegisterType((*TailersCountRequest)(nil), "logproto.TailersCountRequest") + proto.RegisterType((*TailersCountResponse)(nil), "logproto.TailersCountResponse") + proto.RegisterType((*GetChunkIDsRequest)(nil), "logproto.GetChunkIDsRequest") + proto.RegisterType((*GetChunkIDsResponse)(nil), "logproto.GetChunkIDsResponse") + proto.RegisterType((*ChunkRef)(nil), "logproto.ChunkRef") + proto.RegisterType((*LabelValuesForMetricNameRequest)(nil), "logproto.LabelValuesForMetricNameRequest") + proto.RegisterType((*LabelNamesForMetricNameRequest)(nil), "logproto.LabelNamesForMetricNameRequest") + proto.RegisterType((*LineFilterExpression)(nil), "logproto.LineFilterExpression") + proto.RegisterType((*GetChunkRefRequest)(nil), "logproto.GetChunkRefRequest") + proto.RegisterType((*GetChunkRefResponse)(nil), "logproto.GetChunkRefResponse") + proto.RegisterType((*GetSeriesRequest)(nil), "logproto.GetSeriesRequest") + proto.RegisterType((*GetSeriesResponse)(nil), "logproto.GetSeriesResponse") + proto.RegisterType((*IndexSeries)(nil), "logproto.IndexSeries") + proto.RegisterType((*QueryIndexResponse)(nil), "logproto.QueryIndexResponse") + proto.RegisterType((*Row)(nil), "logproto.Row") + proto.RegisterType((*QueryIndexRequest)(nil), "logproto.QueryIndexRequest") + proto.RegisterType((*IndexQuery)(nil), "logproto.IndexQuery") + proto.RegisterType((*IndexStatsRequest)(nil), "logproto.IndexStatsRequest") + proto.RegisterType((*IndexStatsResponse)(nil), "logproto.IndexStatsResponse") + proto.RegisterType((*VolumeRequest)(nil), "logproto.VolumeRequest") + proto.RegisterType((*VolumeResponse)(nil), "logproto.VolumeResponse") + proto.RegisterType((*Volume)(nil), "logproto.Volume") +} + +func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } + +var fileDescriptor_c28a5f14f1f4c79a = []byte{ + // 2279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0x1b, 0xc7, + 0x95, 0x4b, 0x2e, 0xbf, 0x1e, 0x29, 0x59, 0x1e, 0x31, 0x36, 0x41, 0xdb, 0xa4, 0x3c, 0x48, 0x1d, + 0xc1, 0x71, 0xc8, 0x58, 0x69, 0x5d, 0xc7, 0x6e, 0xda, 0x9a, 0x92, 0x3f, 0x64, 0xcb, 0x1f, 0x19, + 0xb9, 0x6e, 0x61, 0xb4, 0x30, 0x56, 0xe2, 0x88, 0x22, 0xcc, 0xe5, 0xd2, 0xbb, 0xc3, 0xd8, 0x02, + 0x7a, 0xe8, 0x1f, 0x08, 0x90, 0x5b, 0xd1, 0x4b, 0xd1, 0x43, 0x81, 0x14, 0x28, 0x7a, 0xe9, 0x0f, + 0x68, 0x2f, 0x3d, 0xb8, 0x37, 0xf7, 0x16, 0xe4, 0xc0, 0xd6, 0xf2, 0xa5, 0xd0, 0x29, 0xb7, 0x00, + 0x3d, 0x15, 0xf3, 0xb5, 0x3b, 0xbb, 0xa2, 0x8c, 0xd0, 0x75, 0x11, 0xf8, 0xc2, 0x9d, 0xf7, 0xe6, + 0xcd, 0x9b, 0xf7, 0x35, 0xef, 0xcd, 0x1b, 0xc2, 0xb1, 0xe1, 0xc3, 0x6e, 0xab, 0xef, 0x75, 0x87, + 0xbe, 0xc7, 0xbc, 0x70, 0xd0, 0x14, 0xbf, 0xa8, 0xa0, 0xe1, 0x5a, 0xa5, 0xeb, 0x75, 0x3d, 0x49, + 0xc3, 0x47, 0x72, 0xbe, 0xd6, 0xe8, 0x7a, 0x5e, 0xb7, 0x4f, 0x5b, 0x02, 0xda, 0x18, 0x6d, 0xb5, + 0x58, 0xcf, 0xa5, 0x01, 0x73, 0xdc, 0xa1, 0x22, 0x58, 0x50, 0xdc, 0x1f, 0xf5, 0x5d, 0xaf, 0x43, + 0xfb, 0xad, 0x80, 0x39, 0x2c, 0x90, 0xbf, 0x8a, 0x62, 0x9e, 0x53, 0x0c, 0x47, 0xc1, 0xb6, 0xf8, + 0x91, 0x48, 0x5c, 0x01, 0xb4, 0xce, 0x7c, 0xea, 0xb8, 0xc4, 0x61, 0x34, 0x20, 0xf4, 0xd1, 0x88, + 0x06, 0x0c, 0xdf, 0x84, 0xf9, 0x18, 0x36, 0x18, 0x7a, 0x83, 0x80, 0xa2, 0x73, 0x50, 0x0a, 0x22, + 0x74, 0xd5, 0x5a, 0xc8, 0x2c, 0x96, 0x96, 0x2a, 0xcd, 0x50, 0x95, 0x68, 0x0d, 0x31, 0x09, 0xf1, + 0x6f, 0x2d, 0x80, 0x68, 0x0e, 0xd5, 0x01, 0xe4, 0xec, 0x35, 0x27, 0xd8, 0xae, 0x5a, 0x0b, 0xd6, + 0xa2, 0x4d, 0x0c, 0x0c, 0x3a, 0x03, 0x87, 0x23, 0xe8, 0x96, 0xb7, 0xbe, 0xed, 0xf8, 0x9d, 0x6a, + 0x5a, 0x90, 0xed, 0x9f, 0x40, 0x08, 0x6c, 0xdf, 0x61, 0xb4, 0x9a, 0x59, 0xb0, 0x16, 0x33, 0x44, + 0x8c, 0xd1, 0x11, 0xc8, 0x31, 0x3a, 0x70, 0x06, 0xac, 0x6a, 0x2f, 0x58, 0x8b, 0x45, 0xa2, 0x20, + 0x8e, 0xe7, 0xba, 0xd3, 0xa0, 0x9a, 0x5d, 0xb0, 0x16, 0x67, 0x88, 0x82, 0xf0, 0xe7, 0x19, 0x28, + 0x7f, 0x3c, 0xa2, 0xfe, 0x8e, 0x32, 0x00, 0xaa, 0x43, 0x21, 0xa0, 0x7d, 0xba, 0xc9, 0x3c, 0x5f, + 0x08, 0x58, 0x6c, 0xa7, 0xab, 0x16, 0x09, 0x71, 0xa8, 0x02, 0xd9, 0x7e, 0xcf, 0xed, 0x31, 0x21, + 0xd6, 0x0c, 0x91, 0x00, 0xba, 0x00, 0xd9, 0x80, 0x39, 0x3e, 0x13, 0xb2, 0x94, 0x96, 0x6a, 0x4d, + 0xe9, 0xb4, 0xa6, 0x76, 0x5a, 0xf3, 0xae, 0x76, 0x5a, 0xbb, 0xf0, 0x74, 0xdc, 0x48, 0x7d, 0xf6, + 0xcf, 0x86, 0x45, 0xe4, 0x12, 0x74, 0x0e, 0x32, 0x74, 0xd0, 0x11, 0xf2, 0x7e, 0xd3, 0x95, 0x7c, + 0x01, 0x3a, 0x0b, 0xc5, 0x4e, 0xcf, 0xa7, 0x9b, 0xac, 0xe7, 0x0d, 0x84, 0x56, 0xb3, 0x4b, 0xf3, + 0x91, 0x47, 0x56, 0xf4, 0x14, 0x89, 0xa8, 0xd0, 0x19, 0xc8, 0x05, 0xdc, 0x74, 0x41, 0x35, 0xbf, + 0x90, 0x59, 0x2c, 0xb6, 0x2b, 0x7b, 0xe3, 0xc6, 0x9c, 0xc4, 0x9c, 0xf1, 0xdc, 0x1e, 0xa3, 0xee, + 0x90, 0xed, 0x10, 0x45, 0x83, 0x4e, 0x43, 0xbe, 0x43, 0xfb, 0x94, 0x3b, 0xbc, 0x20, 0x1c, 0x3e, + 0x67, 0xb0, 0x17, 0x13, 0x44, 0x13, 0xa0, 0xfb, 0x60, 0x0f, 0xfb, 0xce, 0xa0, 0x5a, 0x14, 0x5a, + 0xcc, 0x46, 0x84, 0x77, 0xfa, 0xce, 0xa0, 0x7d, 0xee, 0xcb, 0x71, 0x63, 0xa9, 0xdb, 0x63, 0xdb, + 0xa3, 0x8d, 0xe6, 0xa6, 0xe7, 0xb6, 0xba, 0xbe, 0xb3, 0xe5, 0x0c, 0x9c, 0x56, 0xdf, 0x7b, 0xd8, + 0x6b, 0xf1, 0xe0, 0x7c, 0x34, 0xa2, 0x7e, 0x8f, 0xfa, 0x2d, 0xce, 0xa3, 0x29, 0xfc, 0xc1, 0xd7, + 0x11, 0xc1, 0xf3, 0xba, 0x5d, 0xc8, 0xcd, 0xe5, 0xf1, 0x38, 0x0d, 0x68, 0xdd, 0x71, 0x87, 0x7d, + 0x3a, 0x95, 0xbf, 0x42, 0xcf, 0xa4, 0x5f, 0xd9, 0x33, 0x99, 0x69, 0x3d, 0x13, 0x99, 0xd9, 0x9e, + 0xce, 0xcc, 0xd9, 0x6f, 0x6a, 0xe6, 0xdc, 0xeb, 0x37, 0x33, 0xae, 0x82, 0xcd, 0x21, 0x34, 0x07, + 0x19, 0xdf, 0x79, 0x2c, 0x8c, 0x59, 0x26, 0x7c, 0x88, 0xd7, 0x20, 0x27, 0x05, 0x41, 0xb5, 0xa4, + 0xb5, 0xe3, 0x27, 0x23, 0xb2, 0x74, 0x46, 0xdb, 0x70, 0x2e, 0xb2, 0x61, 0x46, 0x58, 0x07, 0xff, + 0xce, 0x82, 0x19, 0xe5, 0x42, 0x95, 0x5d, 0x36, 0x20, 0x2f, 0x4f, 0xb7, 0xce, 0x2c, 0x47, 0x93, + 0x99, 0xe5, 0x52, 0xc7, 0x19, 0x32, 0xea, 0xb7, 0x5b, 0x4f, 0xc7, 0x0d, 0xeb, 0xcb, 0x71, 0xe3, + 0x9d, 0x97, 0x69, 0x29, 0x92, 0x9c, 0xca, 0x3a, 0x9a, 0x31, 0x7a, 0x57, 0x48, 0xc7, 0x02, 0x15, + 0x07, 0x87, 0x9a, 0x32, 0x41, 0xae, 0x0e, 0xba, 0x34, 0xe0, 0x9c, 0x6d, 0xee, 0x42, 0x22, 0x69, + 0xf0, 0x2f, 0x61, 0x3e, 0x16, 0x6a, 0x4a, 0xce, 0xf3, 0x90, 0x0b, 0xb8, 0x01, 0xb5, 0x98, 0x86, + 0xa3, 0xd6, 0x05, 0xbe, 0x3d, 0xab, 0xe4, 0xcb, 0x49, 0x98, 0x28, 0xfa, 0xe9, 0x76, 0xff, 0x9b, + 0x05, 0xe5, 0x35, 0x67, 0x83, 0xf6, 0x75, 0x8c, 0x23, 0xb0, 0x07, 0x8e, 0x4b, 0x95, 0xc5, 0xc5, + 0x98, 0x27, 0xb4, 0x4f, 0x9c, 0xfe, 0x88, 0x4a, 0x96, 0x05, 0xa2, 0xa0, 0x69, 0x33, 0x91, 0xf5, + 0xca, 0x99, 0xc8, 0x8a, 0xe2, 0xbd, 0x02, 0x59, 0x1e, 0x59, 0x3b, 0x22, 0x0b, 0x15, 0x89, 0x04, + 0xf0, 0x3b, 0x30, 0xa3, 0xb4, 0x50, 0xe6, 0x8b, 0x44, 0xe6, 0xe6, 0x2b, 0x6a, 0x91, 0xb1, 0x0b, + 0x39, 0x69, 0x6d, 0xf4, 0x36, 0x14, 0xc3, 0xea, 0x26, 0xb4, 0xcd, 0xb4, 0x73, 0x7b, 0xe3, 0x46, + 0x9a, 0x05, 0x24, 0x9a, 0x40, 0x0d, 0xc8, 0x8a, 0x95, 0x42, 0x73, 0xab, 0x5d, 0xdc, 0x1b, 0x37, + 0x24, 0x82, 0xc8, 0x0f, 0x3a, 0x0e, 0xf6, 0x36, 0x2f, 0x30, 0xdc, 0x04, 0x76, 0xbb, 0xb0, 0x37, + 0x6e, 0x08, 0x98, 0x88, 0x5f, 0x7c, 0x15, 0xca, 0x6b, 0xb4, 0xeb, 0x6c, 0xee, 0xa8, 0x4d, 0x2b, + 0x9a, 0x1d, 0xdf, 0xd0, 0xd2, 0x3c, 0x4e, 0x42, 0x39, 0xdc, 0xf1, 0x81, 0x1b, 0xa8, 0xa0, 0x2e, + 0x85, 0xb8, 0x9b, 0x01, 0xfe, 0x8d, 0x05, 0xca, 0xcf, 0x08, 0x43, 0xae, 0xcf, 0x75, 0x0d, 0x54, + 0x0e, 0x82, 0xbd, 0x71, 0x43, 0x61, 0x88, 0xfa, 0xa2, 0x8b, 0x90, 0x0f, 0xc4, 0x8e, 0x9c, 0x59, + 0x32, 0x7c, 0xc4, 0x44, 0xfb, 0x10, 0x0f, 0x83, 0xbd, 0x71, 0x43, 0x13, 0x12, 0x3d, 0x40, 0xcd, + 0x58, 0xe5, 0x94, 0x8a, 0xcd, 0xee, 0x8d, 0x1b, 0x06, 0xd6, 0xac, 0xa4, 0xf8, 0x6b, 0x0b, 0x4a, + 0x77, 0x9d, 0x5e, 0x18, 0x42, 0x55, 0xed, 0xa2, 0x28, 0x47, 0x4a, 0x04, 0x3f, 0xd2, 0x1d, 0xda, + 0x77, 0x76, 0xae, 0x78, 0xbe, 0xe0, 0x3b, 0x43, 0x42, 0x38, 0x2a, 0x76, 0xf6, 0xc4, 0x62, 0x97, + 0x9d, 0x3e, 0xa5, 0xfe, 0x1f, 0x13, 0xd8, 0x75, 0xbb, 0x90, 0x9e, 0xcb, 0xe0, 0x3f, 0x59, 0x50, + 0x96, 0x9a, 0xab, 0xb0, 0xfb, 0x39, 0xe4, 0xa4, 0x61, 0x84, 0xee, 0x2f, 0x49, 0x2e, 0xef, 0x4e, + 0x93, 0x58, 0x14, 0x4f, 0xf4, 0x23, 0x98, 0xed, 0xf8, 0xde, 0x70, 0x48, 0x3b, 0xeb, 0x2a, 0x85, + 0xa5, 0x93, 0x29, 0x6c, 0xc5, 0x9c, 0x27, 0x09, 0x72, 0xfc, 0x77, 0x0b, 0x66, 0x54, 0xb6, 0x50, + 0xbe, 0x0a, 0xed, 0x6b, 0xbd, 0x72, 0xc9, 0x4a, 0x4f, 0x5b, 0xb2, 0x8e, 0x40, 0xae, 0xeb, 0x7b, + 0xa3, 0x61, 0x50, 0xcd, 0xc8, 0xb3, 0x29, 0xa1, 0xe9, 0x4a, 0x19, 0xbe, 0x0e, 0xb3, 0x5a, 0x95, + 0x03, 0x52, 0x66, 0x2d, 0x99, 0x32, 0x57, 0x3b, 0x74, 0xc0, 0x7a, 0x5b, 0xbd, 0x30, 0x09, 0x2a, + 0x7a, 0xfc, 0xa9, 0x05, 0x73, 0x49, 0x12, 0xf4, 0x43, 0xe3, 0x9c, 0x71, 0x76, 0xa7, 0x0e, 0x66, + 0xd7, 0x14, 0xc9, 0x27, 0xb8, 0x3c, 0x60, 0xfe, 0x8e, 0x3e, 0x83, 0xb5, 0x0f, 0xa1, 0x64, 0xa0, + 0x79, 0x71, 0x7a, 0x48, 0xd5, 0x99, 0x20, 0x7c, 0x18, 0x25, 0x83, 0xb4, 0x4c, 0x65, 0x02, 0xb8, + 0x90, 0x3e, 0x6f, 0xe1, 0x5f, 0x5b, 0x30, 0x13, 0xf3, 0x24, 0x3a, 0x0f, 0xf6, 0x96, 0xef, 0xb9, + 0x53, 0xb9, 0x49, 0xac, 0x40, 0xdf, 0x85, 0x34, 0xf3, 0xa6, 0x72, 0x52, 0x9a, 0x79, 0xdc, 0x47, + 0x4a, 0xf9, 0x8c, 0xbc, 0xdb, 0x4a, 0x08, 0x7f, 0x0f, 0x8a, 0x42, 0xa9, 0x3b, 0x4e, 0xcf, 0x9f, + 0x58, 0x2b, 0x26, 0x2a, 0x85, 0x2f, 0xc2, 0x21, 0x99, 0x07, 0x27, 0x2f, 0x2e, 0x4f, 0x5a, 0x5c, + 0xd6, 0x8b, 0x8f, 0x41, 0x76, 0x79, 0x7b, 0x34, 0x78, 0xc8, 0x97, 0x74, 0x1c, 0xe6, 0xe8, 0x25, + 0x7c, 0x8c, 0xdf, 0x82, 0x79, 0x7e, 0x02, 0xa9, 0x1f, 0x2c, 0x7b, 0xa3, 0x01, 0xd3, 0xbd, 0xc5, + 0x19, 0xa8, 0xc4, 0xd1, 0x2a, 0x46, 0x2a, 0x90, 0xdd, 0xe4, 0x08, 0xc1, 0x63, 0x86, 0x48, 0x00, + 0xff, 0xde, 0x02, 0x74, 0x95, 0x32, 0xb1, 0xcb, 0xea, 0x4a, 0x78, 0x38, 0x6a, 0x50, 0x70, 0x1d, + 0xb6, 0xb9, 0x4d, 0xfd, 0x40, 0xdf, 0x40, 0x34, 0xfc, 0x6d, 0xdc, 0xf5, 0xf0, 0x59, 0x98, 0x8f, + 0x49, 0xa9, 0x74, 0xaa, 0x41, 0x61, 0x53, 0xe1, 0x54, 0xb5, 0x0b, 0x61, 0xfc, 0xe7, 0x34, 0x14, + 0xc4, 0x02, 0x42, 0xb7, 0xd0, 0x59, 0x28, 0x6d, 0xf5, 0x06, 0x5d, 0xea, 0x0f, 0xfd, 0x9e, 0x32, + 0x81, 0xdd, 0x3e, 0xb4, 0x37, 0x6e, 0x98, 0x68, 0x62, 0x02, 0xe8, 0x3d, 0xc8, 0x8f, 0x02, 0xea, + 0x3f, 0xe8, 0xc9, 0x73, 0x5e, 0x6c, 0x57, 0x76, 0xc7, 0x8d, 0xdc, 0x4f, 0x02, 0xea, 0xaf, 0xae, + 0xf0, 0xba, 0x33, 0x12, 0x23, 0x22, 0xbf, 0x1d, 0x74, 0x43, 0x85, 0xa9, 0xb8, 0x82, 0xb5, 0xbf, + 0xcf, 0xc5, 0x4f, 0x24, 0xba, 0xa1, 0xef, 0xb9, 0x94, 0x6d, 0xd3, 0x51, 0xd0, 0xda, 0xf4, 0x5c, + 0xd7, 0x1b, 0xb4, 0x44, 0x27, 0x29, 0x94, 0xe6, 0xc5, 0x93, 0x2f, 0x57, 0x91, 0x7b, 0x17, 0xf2, + 0x6c, 0xdb, 0xf7, 0x46, 0xdd, 0x6d, 0x51, 0x13, 0x32, 0xed, 0x0b, 0xd3, 0xf3, 0xd3, 0x1c, 0x88, + 0x1e, 0xa0, 0x93, 0xdc, 0x5a, 0x74, 0xf3, 0x61, 0x30, 0x72, 0x65, 0x7f, 0xd6, 0xce, 0xee, 0x8d, + 0x1b, 0xd6, 0x7b, 0x24, 0x44, 0xe3, 0x4f, 0xd3, 0xd0, 0x10, 0x81, 0x7a, 0x4f, 0x5c, 0x1a, 0xae, + 0x78, 0xfe, 0x4d, 0xca, 0xfc, 0xde, 0xe6, 0x2d, 0xc7, 0xa5, 0x3a, 0x36, 0x1a, 0x50, 0x72, 0x05, + 0xf2, 0x81, 0x71, 0x04, 0xc0, 0x0d, 0xe9, 0xd0, 0x09, 0x00, 0x71, 0x66, 0xe4, 0xbc, 0x3c, 0x0d, + 0x45, 0x81, 0x11, 0xd3, 0xcb, 0x31, 0x4b, 0xb5, 0xa6, 0xd4, 0x4c, 0x59, 0x68, 0x35, 0x69, 0xa1, + 0xa9, 0xf9, 0x84, 0x66, 0x31, 0x63, 0x3d, 0x1b, 0x8f, 0x75, 0xfc, 0x0f, 0x0b, 0xea, 0x6b, 0x5a, + 0xf2, 0x57, 0x34, 0x87, 0xd6, 0x37, 0xfd, 0x9a, 0xf4, 0xcd, 0xfc, 0x6f, 0xfa, 0xe2, 0x6b, 0x50, + 0x59, 0xeb, 0x0d, 0xe8, 0x95, 0x5e, 0x9f, 0x51, 0xff, 0xf2, 0x93, 0xa1, 0x4f, 0x83, 0x80, 0xb7, + 0xad, 0x35, 0x28, 0x78, 0x43, 0xea, 0x3b, 0xba, 0xeb, 0xc8, 0x90, 0x10, 0xe6, 0xc9, 0x43, 0xd8, + 0x44, 0xe7, 0x36, 0x01, 0xe0, 0xff, 0x18, 0xc9, 0x83, 0xd0, 0x2d, 0x6d, 0x91, 0x65, 0x23, 0x63, + 0xbf, 0x0e, 0x85, 0xd3, 0xaf, 0xd1, 0xc1, 0x99, 0x44, 0x32, 0x3b, 0x0f, 0xf9, 0x2d, 0x61, 0x08, + 0x59, 0x7a, 0x4b, 0x4b, 0xf5, 0xa8, 0xd6, 0x4d, 0xb2, 0x12, 0xd1, 0xe4, 0xf8, 0xa3, 0x28, 0x25, + 0x09, 0xdd, 0x55, 0x4a, 0x3a, 0x05, 0xb6, 0x4f, 0xb7, 0x74, 0xe5, 0x44, 0x11, 0xb7, 0x90, 0x52, + 0xcc, 0xe3, 0xbf, 0x58, 0x30, 0x77, 0x95, 0xb2, 0xf8, 0x9d, 0xe4, 0x0d, 0xb2, 0x1c, 0xbe, 0x06, + 0x87, 0x0d, 0xf9, 0x95, 0xf6, 0x1f, 0x24, 0x2e, 0x22, 0x6f, 0x45, 0xfa, 0xaf, 0x0e, 0x3a, 0xf4, + 0x89, 0x6a, 0xe0, 0xe2, 0x77, 0x90, 0x3b, 0x50, 0x32, 0x26, 0xd1, 0xa5, 0xc4, 0xed, 0xc3, 0x78, + 0x6e, 0x09, 0x6b, 0x68, 0xbb, 0xa2, 0x74, 0x92, 0x2d, 0x9c, 0xba, 0x5b, 0x86, 0xb5, 0x7a, 0x1d, + 0x90, 0xb8, 0xb6, 0x0a, 0xb6, 0x66, 0xb5, 0x10, 0xd8, 0x1b, 0xe1, 0x65, 0x24, 0x84, 0xd1, 0x49, + 0xb0, 0x7d, 0xef, 0xb1, 0xbe, 0x56, 0xce, 0x44, 0x5b, 0x12, 0xef, 0x31, 0x11, 0x53, 0xf8, 0x22, + 0x64, 0x88, 0xf7, 0x18, 0xd5, 0x01, 0x7c, 0x67, 0xd0, 0xa5, 0xf7, 0xc2, 0x6e, 0xa6, 0x4c, 0x0c, + 0xcc, 0x01, 0x95, 0x7c, 0x19, 0x0e, 0x9b, 0x12, 0x49, 0x77, 0x37, 0x21, 0xff, 0xf1, 0xc8, 0x34, + 0x57, 0x25, 0x61, 0x2e, 0xd9, 0x18, 0x6b, 0x22, 0x1e, 0x33, 0x10, 0xe1, 0xd1, 0x71, 0x28, 0x32, + 0x67, 0xa3, 0x4f, 0x6f, 0x45, 0x79, 0x27, 0x42, 0xf0, 0x59, 0xde, 0x88, 0xdd, 0x33, 0xae, 0x24, + 0x11, 0x02, 0x9d, 0x86, 0xb9, 0x48, 0xe6, 0x3b, 0x3e, 0xdd, 0xea, 0x3d, 0x11, 0x1e, 0x2e, 0x93, + 0x7d, 0x78, 0xb4, 0x08, 0x87, 0x22, 0xdc, 0xba, 0x28, 0xfd, 0xb6, 0x20, 0x4d, 0xa2, 0xb9, 0x6d, + 0x84, 0xba, 0x97, 0x1f, 0x8d, 0x9c, 0xbe, 0x48, 0xa6, 0x65, 0x62, 0x60, 0xf0, 0x5f, 0x2d, 0x38, + 0x2c, 0x5d, 0xcd, 0x5b, 0xf0, 0x37, 0x31, 0xea, 0x3f, 0xb7, 0x00, 0x99, 0x1a, 0xa8, 0xd0, 0xfa, + 0x8e, 0xf9, 0xb6, 0xc2, 0xef, 0x16, 0x25, 0xd1, 0x5f, 0x4a, 0x54, 0xf4, 0x3c, 0x82, 0x21, 0x27, + 0xee, 0x27, 0xb2, 0xd1, 0xb5, 0x65, 0x03, 0x2b, 0x31, 0x44, 0x7d, 0x79, 0xdf, 0xbd, 0xb1, 0xc3, + 0x68, 0xa0, 0xda, 0x4f, 0xd1, 0x77, 0x0b, 0x04, 0x91, 0x1f, 0xbe, 0x17, 0x1d, 0x30, 0x11, 0x35, + 0x76, 0xb4, 0x97, 0x42, 0x11, 0x3d, 0xc0, 0x7f, 0x4c, 0xc3, 0xcc, 0x3d, 0xaf, 0x3f, 0x8a, 0x2a, + 0xd5, 0x9b, 0x94, 0x97, 0x63, 0x3d, 0x71, 0x56, 0xf7, 0xc4, 0x08, 0xec, 0x80, 0xd1, 0xa1, 0x88, + 0xac, 0x0c, 0x11, 0x63, 0x84, 0xa1, 0xcc, 0x1c, 0xbf, 0x4b, 0x99, 0x6c, 0x39, 0xaa, 0x39, 0x71, + 0x0f, 0x8c, 0xe1, 0xd0, 0x02, 0x94, 0x9c, 0x6e, 0xd7, 0xa7, 0x5d, 0x87, 0xd1, 0xf6, 0x4e, 0x35, + 0x2f, 0x36, 0x33, 0x51, 0xf8, 0x67, 0x30, 0xab, 0x8d, 0xa5, 0x5c, 0xfa, 0x3e, 0xe4, 0x3f, 0x11, + 0x98, 0x09, 0xef, 0x50, 0x92, 0x54, 0xa5, 0x31, 0x4d, 0x16, 0x7f, 0xb4, 0xd6, 0x32, 0xe3, 0xeb, + 0x90, 0x93, 0xe4, 0xe8, 0xb8, 0xd9, 0x34, 0xc8, 0x07, 0x13, 0x0e, 0xab, 0x0e, 0x00, 0x43, 0x4e, + 0x32, 0x52, 0x8e, 0x17, 0xb1, 0x21, 0x31, 0x44, 0x7d, 0x4f, 0x9f, 0x82, 0x62, 0xf8, 0xe2, 0x8c, + 0x4a, 0x90, 0xbf, 0x72, 0x9b, 0xfc, 0xf4, 0x12, 0x59, 0x99, 0x4b, 0xa1, 0x32, 0x14, 0xda, 0x97, + 0x96, 0x6f, 0x08, 0xc8, 0x5a, 0xfa, 0xda, 0xd6, 0x99, 0xc5, 0x47, 0x3f, 0x80, 0xac, 0x4c, 0x17, + 0x47, 0x22, 0xf9, 0xcd, 0xb7, 0xdd, 0xda, 0xd1, 0x7d, 0x78, 0x69, 0x01, 0x9c, 0x7a, 0xdf, 0x42, + 0xb7, 0xa0, 0x24, 0x90, 0xea, 0x15, 0xe7, 0x78, 0xf2, 0x31, 0x25, 0xc6, 0xe9, 0xc4, 0x01, 0xb3, + 0x06, 0xbf, 0x0b, 0x90, 0x15, 0x3e, 0x31, 0xa5, 0x31, 0x5f, 0xe1, 0x4c, 0x69, 0x62, 0xef, 0x5a, + 0x38, 0x85, 0x3e, 0x04, 0x9b, 0x77, 0x36, 0xc8, 0x28, 0x2a, 0xc6, 0xe3, 0x4b, 0xed, 0x48, 0x12, + 0x6d, 0x6c, 0xfb, 0x51, 0xf8, 0x86, 0x74, 0x34, 0xd9, 0xcb, 0xea, 0xe5, 0xd5, 0xfd, 0x13, 0xe1, + 0xce, 0xb7, 0xe5, 0x63, 0x87, 0xee, 0xa9, 0xd0, 0x89, 0xf8, 0x56, 0x89, 0x16, 0xac, 0x56, 0x3f, + 0x68, 0x3a, 0x64, 0xb8, 0x06, 0x25, 0xa3, 0x9f, 0x31, 0xcd, 0xba, 0xbf, 0x19, 0x33, 0xcd, 0x3a, + 0xa1, 0x09, 0xc2, 0x29, 0x74, 0x15, 0x0a, 0xbc, 0x14, 0xf3, 0x8c, 0x84, 0x8e, 0x25, 0x2b, 0xae, + 0x91, 0x69, 0x6b, 0xc7, 0x27, 0x4f, 0x86, 0x8c, 0x7e, 0x0c, 0xc5, 0xab, 0x94, 0xa9, 0x70, 0x3d, + 0x9a, 0x8c, 0xf7, 0x09, 0x96, 0x8a, 0x9f, 0x19, 0x9c, 0x5a, 0xfa, 0x85, 0xfe, 0x27, 0x6a, 0xc5, + 0x61, 0x0e, 0xba, 0x0d, 0xb3, 0x42, 0xb0, 0xf0, 0xaf, 0xaa, 0x58, 0x00, 0xed, 0xfb, 0x5f, 0x2c, + 0x16, 0x40, 0xfb, 0xff, 0x1f, 0xc3, 0xa9, 0xf6, 0xfd, 0x67, 0xcf, 0xeb, 0xa9, 0x2f, 0x9e, 0xd7, + 0x53, 0x5f, 0x3d, 0xaf, 0x5b, 0xbf, 0xda, 0xad, 0x5b, 0x7f, 0xd8, 0xad, 0x5b, 0x4f, 0x77, 0xeb, + 0xd6, 0xb3, 0xdd, 0xba, 0xf5, 0xaf, 0xdd, 0xba, 0xf5, 0xef, 0xdd, 0x7a, 0xea, 0xab, 0xdd, 0xba, + 0xf5, 0xd9, 0x8b, 0x7a, 0xea, 0xd9, 0x8b, 0x7a, 0xea, 0x8b, 0x17, 0xf5, 0xd4, 0xfd, 0xb7, 0x5f, + 0xf6, 0xdc, 0xa4, 0x77, 0xdc, 0xc8, 0x89, 0xcf, 0x07, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8b, + 0xe7, 0x73, 0xe7, 0x48, 0x1c, 0x00, 0x00, +} + +func (x Direction) String() string { + s, ok := Direction_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *StreamRatesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamRatesRequest) + if !ok { that2, ok := that.(StreamRatesRequest) if ok { that1 = &that2 @@ -2550,6 +2759,9 @@ func (this *StreamRate) Equal(that interface{}) bool { if this.Tenant != that1.Tenant { return false } + if this.Pushes != that1.Pushes { + return false + } return true } func (this *QueryRequest) Equal(that interface{}) bool { @@ -2602,6 +2814,13 @@ func (this *QueryRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *SampleQueryRequest) Equal(that interface{}) bool { @@ -2648,6 +2867,37 @@ func (this *SampleQueryRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } + return true +} +func (this *Plan) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Plan) + if !ok { + that2, ok := that.(Plan) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Raw, that1.Raw) { + return false + } return true } func (this *Delete) Equal(that interface{}) bool { @@ -2940,6 +3190,13 @@ func (this *TailRequest) Equal(that interface{}) bool { if !this.Start.Equal(that1.Start) { return false } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *TailResponse) Equal(that interface{}) bool { @@ -3109,49 +3366,6 @@ func (this *DroppedStream) Equal(that interface{}) bool { } return true } -func (this *TimeSeriesChunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesChunk) - if !ok { - that2, ok := that.(TimeSeriesChunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(that1.Chunks[i]) { - return false - } - } - return true -} func (this *LabelPair) Equal(that interface{}) bool { if that == nil { return this == nil @@ -3230,27 +3444,6 @@ func (this *Chunk) Equal(that interface{}) bool { } return true } -func (this *TransferChunksResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferChunksResponse) - if !ok { - that2, ok := that.(TransferChunksResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} func (this *TailersCountRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -3457,14 +3650,14 @@ func (this *LabelNamesForMetricNameRequest) Equal(that interface{}) bool { } return true } -func (this *GetChunkRefRequest) Equal(that interface{}) bool { +func (this *LineFilterExpression) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetChunkRefRequest) + that1, ok := that.(*LineFilterExpression) if !ok { - that2, ok := that.(GetChunkRefRequest) + that2, ok := that.(LineFilterExpression) if ok { that1 = &that2 } else { @@ -3476,25 +3669,22 @@ func (this *GetChunkRefRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.From.Equal(that1.From) { - return false - } - if !this.Through.Equal(that1.Through) { + if this.Operator != that1.Operator { return false } - if this.Matchers != that1.Matchers { + if this.Match != that1.Match { return false } return true } -func (this *GetChunkRefResponse) Equal(that interface{}) bool { +func (this *GetChunkRefRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetChunkRefResponse) + that1, ok := that.(*GetChunkRefRequest) if !ok { - that2, ok := that.(GetChunkRefResponse) + that2, ok := that.(GetChunkRefRequest) if ok { that1 = &that2 } else { @@ -3506,13 +3696,51 @@ func (this *GetChunkRefResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Refs) != len(that1.Refs) { + if !this.From.Equal(that1.From) { return false } - for i := range this.Refs { - if !this.Refs[i].Equal(that1.Refs[i]) { - return false - } + if !this.Through.Equal(that1.Through) { + return false + } + if this.Matchers != that1.Matchers { + return false + } + if len(this.Filters) != len(that1.Filters) { + return false + } + for i := range this.Filters { + if !this.Filters[i].Equal(that1.Filters[i]) { + return false + } + } + return true +} +func (this *GetChunkRefResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetChunkRefResponse) + if !ok { + that2, ok := that.(GetChunkRefResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Refs) != len(that1.Refs) { + return false + } + for i := range this.Refs { + if !this.Refs[i].Equal(that1.Refs[i]) { + return false + } } return true } @@ -3791,6 +4019,112 @@ func (this *IndexStatsResponse) Equal(that interface{}) bool { } return true } +func (this *VolumeRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumeRequest) + if !ok { + that2, ok := that.(VolumeRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.From.Equal(that1.From) { + return false + } + if !this.Through.Equal(that1.Through) { + return false + } + if this.Matchers != that1.Matchers { + return false + } + if this.Limit != that1.Limit { + return false + } + if this.Step != that1.Step { + return false + } + if len(this.TargetLabels) != len(that1.TargetLabels) { + return false + } + for i := range this.TargetLabels { + if this.TargetLabels[i] != that1.TargetLabels[i] { + return false + } + } + if this.AggregateBy != that1.AggregateBy { + return false + } + return true +} +func (this *VolumeResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumeResponse) + if !ok { + that2, ok := that.(VolumeResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Volumes) != len(that1.Volumes) { + return false + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return false + } + } + if this.Limit != that1.Limit { + return false + } + return true +} +func (this *Volume) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Volume) + if !ok { + that2, ok := that.(Volume) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Volume != that1.Volume { + return false + } + return true +} func (this *StreamRatesRequest) GoString() string { if this == nil { return "nil" @@ -3816,12 +4150,13 @@ func (this *StreamRate) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&logproto.StreamRate{") s = append(s, "StreamHash: "+fmt.Sprintf("%#v", this.StreamHash)+",\n") s = append(s, "StreamHashNoShard: "+fmt.Sprintf("%#v", this.StreamHashNoShard)+",\n") s = append(s, "Rate: "+fmt.Sprintf("%#v", this.Rate)+",\n") s = append(s, "Tenant: "+fmt.Sprintf("%#v", this.Tenant)+",\n") + s = append(s, "Pushes: "+fmt.Sprintf("%#v", this.Pushes)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3829,7 +4164,7 @@ func (this *QueryRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&logproto.QueryRequest{") s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -3840,6 +4175,7 @@ func (this *QueryRequest) GoString() string { if this.Deletes != nil { s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") } + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3847,7 +4183,7 @@ func (this *SampleQueryRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&logproto.SampleQueryRequest{") s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") @@ -3856,6 +4192,17 @@ func (this *SampleQueryRequest) GoString() string { if this.Deletes != nil { s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") } + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Plan) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.Plan{") + s = append(s, "Raw: "+fmt.Sprintf("%#v", this.Raw)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3962,12 +4309,13 @@ func (this *TailRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&logproto.TailRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -4047,23 +4395,6 @@ func (this *DroppedStream) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *TimeSeriesChunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&logproto.TimeSeriesChunk{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Labels != nil { - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - } - if this.Chunks != nil { - s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} func (this *LabelPair) GoString() string { if this == nil { return "nil" @@ -4096,15 +4427,6 @@ func (this *Chunk) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *TransferChunksResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&logproto.TransferChunksResponse{") - s = append(s, "}") - return strings.Join(s, "") -} func (this *TailersCountRequest) GoString() string { if this == nil { return "nil" @@ -4186,15 +4508,29 @@ func (this *LabelNamesForMetricNameRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *LineFilterExpression) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.LineFilterExpression{") + s = append(s, "Operator: "+fmt.Sprintf("%#v", this.Operator)+",\n") + s = append(s, "Match: "+fmt.Sprintf("%#v", this.Match)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *GetChunkRefRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&logproto.GetChunkRefRequest{") s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n") s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + if this.Filters != nil { + s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -4323,6 +4659,50 @@ func (this *IndexStatsResponse) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *VolumeRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&logproto.VolumeRequest{") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n") + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n") + s = append(s, "TargetLabels: "+fmt.Sprintf("%#v", this.TargetLabels)+",\n") + s = append(s, "AggregateBy: "+fmt.Sprintf("%#v", this.AggregateBy)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VolumeResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.VolumeResponse{") + if this.Volumes != nil { + vs := make([]*Volume, len(this.Volumes)) + for i := range vs { + vs[i] = &this.Volumes[i] + } + s = append(s, "Volumes: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.Volume{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Volume: "+fmt.Sprintf("%#v", this.Volume)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringLogproto(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -4354,6 +4734,9 @@ type QuerierClient interface { // Note: this MUST be the same as the variant defined in // indexgateway.proto on the IndexGateway service. GetStats(ctx context.Context, in *IndexStatsRequest, opts ...grpc.CallOption) (*IndexStatsResponse, error) + // Note: this MUST be the same as the variant defined in + // indexgateway.proto on the IndexGateway service. + GetVolume(ctx context.Context, in *VolumeRequest, opts ...grpc.CallOption) (*VolumeResponse, error) } type querierClient struct { @@ -4505,6 +4888,15 @@ func (c *querierClient) GetStats(ctx context.Context, in *IndexStatsRequest, opt return out, nil } +func (c *querierClient) GetVolume(ctx context.Context, in *VolumeRequest, opts ...grpc.CallOption) (*VolumeResponse, error) { + out := new(VolumeResponse) + err := c.cc.Invoke(ctx, "/logproto.Querier/GetVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // QuerierServer is the server API for Querier service. type QuerierServer interface { Query(*QueryRequest, Querier_QueryServer) error @@ -4517,6 +4909,9 @@ type QuerierServer interface { // Note: this MUST be the same as the variant defined in // indexgateway.proto on the IndexGateway service. GetStats(context.Context, *IndexStatsRequest) (*IndexStatsResponse, error) + // Note: this MUST be the same as the variant defined in + // indexgateway.proto on the IndexGateway service. + GetVolume(context.Context, *VolumeRequest) (*VolumeResponse, error) } // UnimplementedQuerierServer can be embedded to have forward compatible implementations. @@ -4547,6 +4942,9 @@ func (*UnimplementedQuerierServer) GetChunkIDs(ctx context.Context, req *GetChun func (*UnimplementedQuerierServer) GetStats(ctx context.Context, req *IndexStatsRequest) (*IndexStatsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetStats not implemented") } +func (*UnimplementedQuerierServer) GetVolume(ctx context.Context, req *VolumeRequest) (*VolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolume not implemented") +} func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) { s.RegisterService(&_Querier_serviceDesc, srv) @@ -4705,6 +5103,24 @@ func _Querier_GetStats_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Querier_GetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).GetVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Querier/GetVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).GetVolume(ctx, req.(*VolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Querier_serviceDesc = grpc.ServiceDesc{ ServiceName: "logproto.Querier", HandlerType: (*QuerierServer)(nil), @@ -4729,6 +5145,10 @@ var _Querier_serviceDesc = grpc.ServiceDesc{ MethodName: "GetStats", Handler: _Querier_GetStats_Handler, }, + { + MethodName: "GetVolume", + Handler: _Querier_GetVolume_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -4750,160 +5170,54 @@ var _Querier_serviceDesc = grpc.ServiceDesc{ Metadata: "pkg/logproto/logproto.proto", } -// IngesterClient is the client API for Ingester service. +// StreamDataClient is the client API for StreamData service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngesterClient interface { - TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) +type StreamDataClient interface { + GetStreamRates(ctx context.Context, in *StreamRatesRequest, opts ...grpc.CallOption) (*StreamRatesResponse, error) } -type ingesterClient struct { +type streamDataClient struct { cc *grpc.ClientConn } -func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { - return &ingesterClient{cc} +func NewStreamDataClient(cc *grpc.ClientConn) StreamDataClient { + return &streamDataClient{cc} } -func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/logproto.Ingester/TransferChunks", opts...) +func (c *streamDataClient) GetStreamRates(ctx context.Context, in *StreamRatesRequest, opts ...grpc.CallOption) (*StreamRatesResponse, error) { + out := new(StreamRatesResponse) + err := c.cc.Invoke(ctx, "/logproto.StreamData/GetStreamRates", in, out, opts...) if err != nil { return nil, err } - x := &ingesterTransferChunksClient{stream} - return x, nil + return out, nil } -type Ingester_TransferChunksClient interface { - Send(*TimeSeriesChunk) error - CloseAndRecv() (*TransferChunksResponse, error) - grpc.ClientStream +// StreamDataServer is the server API for StreamData service. +type StreamDataServer interface { + GetStreamRates(context.Context, *StreamRatesRequest) (*StreamRatesResponse, error) } -type ingesterTransferChunksClient struct { - grpc.ClientStream +// UnimplementedStreamDataServer can be embedded to have forward compatible implementations. +type UnimplementedStreamDataServer struct { } -func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { - return x.ClientStream.SendMsg(m) +func (*UnimplementedStreamDataServer) GetStreamRates(ctx context.Context, req *StreamRatesRequest) (*StreamRatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStreamRates not implemented") } -func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { +func RegisterStreamDataServer(s *grpc.Server, srv StreamDataServer) { + s.RegisterService(&_StreamData_serviceDesc, srv) +} + +func _StreamData_GetStreamRates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StreamRatesRequest) + if err := dec(in); err != nil { return nil, err } - m := new(TransferChunksResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// IngesterServer is the server API for Ingester service. -type IngesterServer interface { - TransferChunks(Ingester_TransferChunksServer) error -} - -// UnimplementedIngesterServer can be embedded to have forward compatible implementations. -type UnimplementedIngesterServer struct { -} - -func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { - return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") -} - -func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { - s.RegisterService(&_Ingester_serviceDesc, srv) -} - -func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) -} - -type Ingester_TransferChunksServer interface { - SendAndClose(*TransferChunksResponse) error - Recv() (*TimeSeriesChunk, error) - grpc.ServerStream -} - -type ingesterTransferChunksServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { - m := new(TimeSeriesChunk) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Ingester_serviceDesc = grpc.ServiceDesc{ - ServiceName: "logproto.Ingester", - HandlerType: (*IngesterServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "TransferChunks", - Handler: _Ingester_TransferChunks_Handler, - ClientStreams: true, - }, - }, - Metadata: "pkg/logproto/logproto.proto", -} - -// StreamDataClient is the client API for StreamData service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type StreamDataClient interface { - GetStreamRates(ctx context.Context, in *StreamRatesRequest, opts ...grpc.CallOption) (*StreamRatesResponse, error) -} - -type streamDataClient struct { - cc *grpc.ClientConn -} - -func NewStreamDataClient(cc *grpc.ClientConn) StreamDataClient { - return &streamDataClient{cc} -} - -func (c *streamDataClient) GetStreamRates(ctx context.Context, in *StreamRatesRequest, opts ...grpc.CallOption) (*StreamRatesResponse, error) { - out := new(StreamRatesResponse) - err := c.cc.Invoke(ctx, "/logproto.StreamData/GetStreamRates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StreamDataServer is the server API for StreamData service. -type StreamDataServer interface { - GetStreamRates(context.Context, *StreamRatesRequest) (*StreamRatesResponse, error) -} - -// UnimplementedStreamDataServer can be embedded to have forward compatible implementations. -type UnimplementedStreamDataServer struct { -} - -func (*UnimplementedStreamDataServer) GetStreamRates(ctx context.Context, req *StreamRatesRequest) (*StreamRatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStreamRates not implemented") -} - -func RegisterStreamDataServer(s *grpc.Server, srv StreamDataServer) { - s.RegisterService(&_StreamData_serviceDesc, srv) -} - -func _StreamData_GetStreamRates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StreamRatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StreamDataServer).GetStreamRates(ctx, in) + if interceptor == nil { + return srv.(StreamDataServer).GetStreamRates(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, @@ -5008,6 +5322,11 @@ func (m *StreamRate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Pushes != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Pushes)) + i-- + dAtA[i] = 0x28 + } if len(m.Tenant) > 0 { i -= len(m.Tenant) copy(dAtA[i:], m.Tenant) @@ -5053,6 +5372,18 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if len(m.Deletes) > 0 { for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { { @@ -5081,21 +5412,21 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintLogproto(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) if err2 != nil { return 0, err2 } i -= n2 i = encodeVarintLogproto(dAtA, i, uint64(n2)) i-- + dAtA[i] = 0x22 + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintLogproto(dAtA, i, uint64(n3)) + i-- dAtA[i] = 0x1a if m.Limit != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) @@ -5132,6 +5463,18 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Deletes) > 0 { for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { { @@ -5155,20 +5498,20 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err3 != nil { - return 0, err3 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err5 != nil { + return 0, err5 } - i -= n3 - i = encodeVarintLogproto(dAtA, i, uint64(n3)) + i -= n5 + i = encodeVarintLogproto(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x1a - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err4 != nil { - return 0, err4 + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err6 != nil { + return 0, err6 } - i -= n4 - i = encodeVarintLogproto(dAtA, i, uint64(n4)) + i -= n6 + i = encodeVarintLogproto(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x12 if len(m.Selector) > 0 { @@ -5181,6 +5524,36 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Plan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Plan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Raw) > 0 { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Delete) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5343,22 +5716,22 @@ func (m *LabelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } if m.End != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) - if err7 != nil { - return 0, err7 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) + if err9 != nil { + return 0, err9 } - i -= n7 - i = encodeVarintLogproto(dAtA, i, uint64(n7)) + i -= n9 + i = encodeVarintLogproto(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x22 } if m.Start != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) - if err8 != nil { - return 0, err8 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) + if err10 != nil { + return 0, err10 } - i -= n8 - i = encodeVarintLogproto(dAtA, i, uint64(n8)) + i -= n10 + i = encodeVarintLogproto(dAtA, i, uint64(n10)) i-- dAtA[i] = 0x1a } @@ -5556,12 +5929,24 @@ func (m *TailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err9 != nil { - return 0, err9 + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err12 != nil { + return 0, err12 } - i -= n9 - i = encodeVarintLogproto(dAtA, i, uint64(n9)) + i -= n12 + i = encodeVarintLogproto(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x2a if m.Limit != 0 { @@ -5671,20 +6056,20 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x1a } } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err11 != nil { - return 0, err11 + n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err14 != nil { + return 0, err14 } - i -= n11 - i = encodeVarintLogproto(dAtA, i, uint64(n11)) + i -= n14 + i = encodeVarintLogproto(dAtA, i, uint64(n14)) i-- dAtA[i] = 0x12 - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err12 != nil { - return 0, err12 + n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err15 != nil { + return 0, err15 } - i -= n12 - i = encodeVarintLogproto(dAtA, i, uint64(n12)) + i -= n15 + i = encodeVarintLogproto(dAtA, i, uint64(n15)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -5796,90 +6181,25 @@ func (m *DroppedStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):]) - if err13 != nil { - return 0, err13 + n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):]) + if err16 != nil { + return 0, err16 } - i -= n13 - i = encodeVarintLogproto(dAtA, i, uint64(n13)) + i -= n16 + i = encodeVarintLogproto(dAtA, i, uint64(n16)) i-- dAtA[i] = 0x12 - n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):]) - if err14 != nil { - return 0, err14 + n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):]) + if err17 != nil { + return 0, err17 } - i -= n14 - i = encodeVarintLogproto(dAtA, i, uint64(n14)) + i -= n17 + i = encodeVarintLogproto(dAtA, i, uint64(n17)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *LabelPair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5984,29 +6304,6 @@ func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - func (m *TailersCountRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6078,20 +6375,20 @@ func (m *GetChunkIDsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err15 != nil { - return 0, err15 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err18 != nil { + return 0, err18 } - i -= n15 - i = encodeVarintLogproto(dAtA, i, uint64(n15)) + i -= n18 + i = encodeVarintLogproto(dAtA, i, uint64(n18)) i-- dAtA[i] = 0x1a - n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err16 != nil { - return 0, err16 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err19 != nil { + return 0, err19 } - i -= n16 - i = encodeVarintLogproto(dAtA, i, uint64(n16)) + i -= n19 + i = encodeVarintLogproto(dAtA, i, uint64(n19)) i-- dAtA[i] = 0x12 if len(m.Matchers) > 0 { @@ -6280,7 +6577,7 @@ func (m *LabelNamesForMetricNameRequest) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *GetChunkRefRequest) Marshal() (dAtA []byte, err error) { +func (m *LineFilterExpression) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6290,19 +6587,68 @@ func (m *GetChunkRefRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetChunkRefRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *LineFilterExpression) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetChunkRefRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LineFilterExpression) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Matchers) > 0 { - i -= len(m.Matchers) - copy(dAtA[i:], m.Matchers) + if len(m.Match) > 0 { + i -= len(m.Match) + copy(dAtA[i:], m.Match) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Match))) + i-- + dAtA[i] = 0x12 + } + if m.Operator != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Operator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetChunkRefRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetChunkRefRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetChunkRefRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Matchers) > 0 { + i -= len(m.Matchers) + copy(dAtA[i:], m.Matchers) i = encodeVarintLogproto(dAtA, i, uint64(len(m.Matchers))) i-- dAtA[i] = 0x1a @@ -6730,6 +7076,149 @@ func (m *IndexStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AggregateBy) > 0 { + i -= len(m.AggregateBy) + copy(dAtA[i:], m.AggregateBy) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.AggregateBy))) + i-- + dAtA[i] = 0x3a + } + if len(m.TargetLabels) > 0 { + for iNdEx := len(m.TargetLabels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetLabels[iNdEx]) + copy(dAtA[i:], m.TargetLabels[iNdEx]) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.TargetLabels[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Step != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x28 + } + if m.Limit != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 + } + if len(m.Matchers) > 0 { + i -= len(m.Matchers) + copy(dAtA[i:], m.Matchers) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Matchers))) + i-- + dAtA[i] = 0x1a + } + if m.Through != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Through)) + i-- + dAtA[i] = 0x10 + } + if m.From != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.From)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VolumeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limit != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Volume != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.Volume)) + i-- + dAtA[i] = 0x18 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintLogproto(dAtA []byte, offset int, v uint64) int { offset -= sovLogproto(v) base := offset @@ -6784,6 +7273,9 @@ func (m *StreamRate) Size() (n int) { if l > 0 { n += 1 + l + sovLogproto(uint64(l)) } + if m.Pushes != 0 { + n += 1 + sovLogproto(uint64(m.Pushes)) + } return n } @@ -6819,6 +7311,10 @@ func (m *QueryRequest) Size() (n int) { n += 1 + l + sovLogproto(uint64(l)) } } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -6848,6 +7344,23 @@ func (m *SampleQueryRequest) Size() (n int) { n += 1 + l + sovLogproto(uint64(l)) } } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *Plan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Raw) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -7020,6 +7533,10 @@ func (m *TailRequest) Size() (n int) { } l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) n += 1 + l + sovLogproto(uint64(l)) + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -7116,35 +7633,6 @@ func (m *DroppedStream) Size() (n int) { return n } -func (m *TimeSeriesChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovLogproto(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovLogproto(uint64(l)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovLogproto(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovLogproto(uint64(l)) - } - } - return n -} - func (m *LabelPair) Size() (n int) { if m == nil { return 0 @@ -7192,15 +7680,6 @@ func (m *Chunk) Size() (n int) { return n } -func (m *TransferChunksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - func (m *TailersCountRequest) Size() (n int) { if m == nil { return 0 @@ -7325,6 +7804,22 @@ func (m *LabelNamesForMetricNameRequest) Size() (n int) { return n } +func (m *LineFilterExpression) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Operator != 0 { + n += 1 + sovLogproto(uint64(m.Operator)) + } + l = len(m.Match) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + func (m *GetChunkRefRequest) Size() (n int) { if m == nil { return 0 @@ -7341,6 +7836,12 @@ func (m *GetChunkRefRequest) Size() (n int) { if l > 0 { n += 1 + l + sovLogproto(uint64(l)) } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } return n } @@ -7528,32 +8029,101 @@ func (m *IndexStatsResponse) Size() (n int) { return n } -func sovLogproto(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogproto(x uint64) (n int) { - return sovLogproto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StreamRatesRequest) String() string { - if this == nil { - return "nil" +func (m *VolumeRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&StreamRatesRequest{`, - `}`, - }, "") - return s -} -func (this *StreamRatesResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.From != 0 { + n += 1 + sovLogproto(uint64(m.From)) } - repeatedStringForStreamRates := "[]*StreamRate{" - for _, f := range this.StreamRates { - repeatedStringForStreamRates += strings.Replace(f.String(), "StreamRate", "StreamRate", 1) + "," + if m.Through != 0 { + n += 1 + sovLogproto(uint64(m.Through)) } - repeatedStringForStreamRates += "}" - s := strings.Join([]string{`&StreamRatesResponse{`, - `StreamRates:` + repeatedStringForStreamRates + `,`, + l = len(m.Matchers) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovLogproto(uint64(m.Limit)) + } + if m.Step != 0 { + n += 1 + sovLogproto(uint64(m.Step)) + } + if len(m.TargetLabels) > 0 { + for _, s := range m.TargetLabels { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + l = len(m.AggregateBy) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *VolumeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + if m.Limit != 0 { + n += 1 + sovLogproto(uint64(m.Limit)) + } + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if m.Volume != 0 { + n += 1 + sovLogproto(uint64(m.Volume)) + } + return n +} + +func sovLogproto(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogproto(x uint64) (n int) { + return sovLogproto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StreamRatesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRatesRequest{`, + `}`, + }, "") + return s +} +func (this *StreamRatesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStreamRates := "[]*StreamRate{" + for _, f := range this.StreamRates { + repeatedStringForStreamRates += strings.Replace(f.String(), "StreamRate", "StreamRate", 1) + "," + } + repeatedStringForStreamRates += "}" + s := strings.Join([]string{`&StreamRatesResponse{`, + `StreamRates:` + repeatedStringForStreamRates + `,`, `}`, }, "") return s @@ -7567,6 +8137,7 @@ func (this *StreamRate) String() string { `StreamHashNoShard:` + fmt.Sprintf("%v", this.StreamHashNoShard) + `,`, `Rate:` + fmt.Sprintf("%v", this.Rate) + `,`, `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `Pushes:` + fmt.Sprintf("%v", this.Pushes) + `,`, `}`, }, "") return s @@ -7588,6 +8159,7 @@ func (this *QueryRequest) String() string { `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Deletes:` + repeatedStringForDeletes + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -7607,6 +8179,17 @@ func (this *SampleQueryRequest) String() string { `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Deletes:` + repeatedStringForDeletes + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, + `}`, + }, "") + return s +} +func (this *Plan) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Plan{`, + `Raw:` + fmt.Sprintf("%v", this.Raw) + `,`, `}`, }, "") return s @@ -7718,6 +8301,7 @@ func (this *TailRequest) String() string { `DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -7798,29 +8382,6 @@ func (this *DroppedStream) String() string { }, "") return s } -func (this *TimeSeriesChunk) String() string { - if this == nil { - return "nil" - } - repeatedStringForLabels := "[]*LabelPair{" - for _, f := range this.Labels { - repeatedStringForLabels += strings.Replace(f.String(), "LabelPair", "LabelPair", 1) + "," - } - repeatedStringForLabels += "}" - repeatedStringForChunks := "[]*Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "Chunk", "Chunk", 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&TimeSeriesChunk{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + repeatedStringForLabels + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} func (this *LabelPair) String() string { if this == nil { return "nil" @@ -7853,15 +8414,6 @@ func (this *Chunk) String() string { }, "") return s } -func (this *TransferChunksResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferChunksResponse{`, - `}`, - }, "") - return s -} func (this *TailersCountRequest) String() string { if this == nil { return "nil" @@ -7943,14 +8495,31 @@ func (this *LabelNamesForMetricNameRequest) String() string { }, "") return s } +func (this *LineFilterExpression) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LineFilterExpression{`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Match:` + fmt.Sprintf("%v", this.Match) + `,`, + `}`, + }, "") + return s +} func (this *GetChunkRefRequest) String() string { if this == nil { return "nil" } + repeatedStringForFilters := "[]*LineFilterExpression{" + for _, f := range this.Filters { + repeatedStringForFilters += strings.Replace(f.String(), "LineFilterExpression", "LineFilterExpression", 1) + "," + } + repeatedStringForFilters += "}" s := strings.Join([]string{`&GetChunkRefRequest{`, `From:` + fmt.Sprintf("%v", this.From) + `,`, `Through:` + fmt.Sprintf("%v", this.Through) + `,`, `Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`, + `Filters:` + repeatedStringForFilters + `,`, `}`, }, "") return s @@ -8088,6 +8657,49 @@ func (this *IndexStatsResponse) String() string { }, "") return s } +func (this *VolumeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeRequest{`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Through:` + fmt.Sprintf("%v", this.Through) + `,`, + `Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Step:` + fmt.Sprintf("%v", this.Step) + `,`, + `TargetLabels:` + fmt.Sprintf("%v", this.TargetLabels) + `,`, + `AggregateBy:` + fmt.Sprintf("%v", this.AggregateBy) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&VolumeResponse{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `}`, + }, "") + return s +} func valueToStringLogproto(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -8354,6 +8966,25 @@ func (m *StreamRate) Unmarshal(dAtA []byte) error { } m.Tenant = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pushes", wireType) + } + m.Pushes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pushes |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -8609,6 +9240,42 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -8826,6 +9493,42 @@ func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -8850,7 +9553,7 @@ func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *Delete) Unmarshal(dAtA []byte) error { +func (m *Plan) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8873,17 +9576,17 @@ func (m *Delete) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Delete: wiretype end group for non-group") + return fmt.Errorf("proto: Plan: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Delete: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLogproto @@ -8893,52 +9596,139 @@ func (m *Delete) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthLogproto } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthLogproto } if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(dAtA[iNdEx:postIndex]) + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthLogproto } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Delete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Delete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Delete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } if iNdEx >= l { return io.ErrUnexpectedEOF } @@ -9962,6 +10752,42 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -10710,7 +11536,7 @@ func (m *DroppedStream) Unmarshal(dAtA []byte) error { } return nil } -func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { +func (m *LabelPair) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10733,15 +11559,15 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10769,11 +11595,11 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10801,75 +11627,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogproto - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogproto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, &LabelPair{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogproto - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogproto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10895,7 +11653,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelPair) Unmarshal(dAtA []byte) error { +func (m *LegacyLabelPair) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10918,17 +11676,17 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + return fmt.Errorf("proto: LegacyLabelPair: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LegacyLabelPair: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLogproto @@ -10938,29 +11696,31 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthLogproto } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthLogproto } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLogproto @@ -10970,23 +11730,25 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthLogproto } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthLogproto } if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -11012,7 +11774,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } return nil } -func (m *LegacyLabelPair) Unmarshal(dAtA []byte) error { +func (m *Chunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11035,15 +11797,15 @@ func (m *LegacyLabelPair) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LegacyLabelPair: wiretype end group for non-group") + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LegacyLabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11070,185 +11832,11 @@ func (m *LegacyLabelPair) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogproto - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogproto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLogproto - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthLogproto - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogproto - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogproto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLogproto - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthLogproto - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -12105,7 +12693,7 @@ func (m *LabelNamesForMetricNameRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { +func (m *LineFilterExpression) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12128,17 +12716,17 @@ func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetChunkRefRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LineFilterExpression: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetChunkRefRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LineFilterExpression: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) } - m.From = 0 + m.Operator = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLogproto @@ -12148,33 +12736,14 @@ func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift + m.Operator |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Through", wireType) - } - m.Through = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12202,7 +12771,7 @@ func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = string(dAtA[iNdEx:postIndex]) + m.Match = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -12228,7 +12797,7 @@ func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetChunkRefResponse) Unmarshal(dAtA []byte) error { +func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12251,17 +12820,55 @@ func (m *GetChunkRefResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetChunkRefResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetChunkRefRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetChunkRefResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetChunkRefRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Through", wireType) + } + m.Through = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLogproto @@ -12271,28 +12878,147 @@ func (m *GetChunkRefResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthLogproto } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthLogproto } if postIndex > l { return io.ErrUnexpectedEOF } - m.Refs = append(m.Refs, &ChunkRef{}) - if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Matchers = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &LineFilterExpression{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetChunkRefResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetChunkRefResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetChunkRefResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, &ChunkRef{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) if err != nil { return err @@ -13410,6 +14136,441 @@ func (m *IndexStatsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Through", wireType) + } + m.Through = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetLabels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetLabels = append(m.TargetLabels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregateBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AggregateBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + m.Volume = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Volume |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipLogproto(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/grafana/loki/pkg/logproto/logproto.proto b/vendor/github.com/grafana/loki/pkg/logproto/logproto.proto index b3b79f0b..7cd6f32f 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/logproto.proto +++ b/vendor/github.com/grafana/loki/pkg/logproto/logproto.proto @@ -27,10 +27,9 @@ service Querier { // Note: this MUST be the same as the variant defined in // indexgateway.proto on the IndexGateway service. rpc GetStats(IndexStatsRequest) returns (IndexStatsResponse) {} -} - -service Ingester { - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {} + // Note: this MUST be the same as the variant defined in + // indexgateway.proto on the IndexGateway service. + rpc GetVolume(VolumeRequest) returns (VolumeResponse) {} } service StreamData { @@ -48,10 +47,11 @@ message StreamRate { uint64 streamHashNoShard = 2; int64 rate = 3; // rate in plain bytes. string tenant = 4; + uint32 pushes = 5; } message QueryRequest { - string selector = 1; + string selector = 1 [deprecated = true]; uint32 limit = 2; google.protobuf.Timestamp start = 3 [ (gogoproto.stdtime) = true, @@ -65,10 +65,11 @@ message QueryRequest { reserved 6; repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; repeated Delete deletes = 8; + Plan plan = 9 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message SampleQueryRequest { - string selector = 1; + string selector = 1 [deprecated = true]; // mark as reserved once we've fully migrated to plan. google.protobuf.Timestamp start = 2 [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false @@ -79,6 +80,11 @@ message SampleQueryRequest { ]; repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; repeated Delete deletes = 5; + Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; +} + +message Plan { + bytes raw = 1; } message Delete { @@ -148,7 +154,7 @@ message Series { } message TailRequest { - string query = 1; + string query = 1 [deprecated = true]; reserved 2; uint32 delayFor = 3; uint32 limit = 4; @@ -156,6 +162,7 @@ message TailRequest { (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message TailResponse { @@ -196,19 +203,13 @@ message DroppedStream { string labels = 3; } -message TimeSeriesChunk { - string from_ingester_id = 1; - string user_id = 2; - repeated LabelPair labels = 3; - repeated Chunk chunks = 4; -} - message LabelPair { string name = 1; string value = 2; } // LegacyLabelPair exists for backwards compatibility reasons and is deprecated. Do not use. +// Use LabelPair instead. message LegacyLabelPair { bytes name = 1; bytes value = 2; @@ -218,8 +219,6 @@ message Chunk { bytes data = 1; } -message TransferChunksResponse {} - message TailersCountRequest {} message TailersCountResponse { @@ -294,6 +293,11 @@ message LabelNamesForMetricNameRequest { ]; } +message LineFilterExpression { + int64 operator = 1; + string match = 2; +} + message GetChunkRefRequest { int64 from = 1 [ (gogoproto.customtype) = "github.com/prometheus/common/model.Time", @@ -304,6 +308,7 @@ message GetChunkRefRequest { (gogoproto.nullable) = false ]; string matchers = 3; + repeated LineFilterExpression filters = 4; } message GetChunkRefResponse { @@ -376,3 +381,29 @@ message IndexStatsResponse { uint64 bytes = 3 [(gogoproto.jsontag) = "bytes"]; uint64 entries = 4 [(gogoproto.jsontag) = "entries"]; } + +message VolumeRequest { + int64 from = 1 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 through = 2 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + string matchers = 3; + int32 limit = 4; + int64 step = 5; + repeated string targetLabels = 6; + string aggregateBy = 7; +} + +message VolumeResponse { + repeated Volume volumes = 1 [(gogoproto.nullable) = false]; + int32 limit = 2; +} + +message Volume { + string name = 1 [(gogoproto.jsontag) = "name"]; + uint64 volume = 3 [(gogoproto.jsontag) = "volume"]; +} diff --git a/vendor/github.com/grafana/loki/pkg/logproto/sketch.pb.go b/vendor/github.com/grafana/loki/pkg/logproto/sketch.pb.go new file mode 100644 index 00000000..c555d64d --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logproto/sketch.pb.go @@ -0,0 +1,3540 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/logproto/sketch.proto + +package logproto + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QuantileSketchMatrix struct { + Values []*QuantileSketchVector `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (m *QuantileSketchMatrix) Reset() { *m = QuantileSketchMatrix{} } +func (*QuantileSketchMatrix) ProtoMessage() {} +func (*QuantileSketchMatrix) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{0} +} +func (m *QuantileSketchMatrix) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuantileSketchMatrix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuantileSketchMatrix.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuantileSketchMatrix) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantileSketchMatrix.Merge(m, src) +} +func (m *QuantileSketchMatrix) XXX_Size() int { + return m.Size() +} +func (m *QuantileSketchMatrix) XXX_DiscardUnknown() { + xxx_messageInfo_QuantileSketchMatrix.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantileSketchMatrix proto.InternalMessageInfo + +func (m *QuantileSketchMatrix) GetValues() []*QuantileSketchVector { + if m != nil { + return m.Values + } + return nil +} + +type QuantileSketchVector struct { + Samples []*QuantileSketchSample `protobuf:"bytes,1,rep,name=samples,proto3" json:"samples,omitempty"` +} + +func (m *QuantileSketchVector) Reset() { *m = QuantileSketchVector{} } +func (*QuantileSketchVector) ProtoMessage() {} +func (*QuantileSketchVector) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{1} +} +func (m *QuantileSketchVector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuantileSketchVector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuantileSketchVector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuantileSketchVector) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantileSketchVector.Merge(m, src) +} +func (m *QuantileSketchVector) XXX_Size() int { + return m.Size() +} +func (m *QuantileSketchVector) XXX_DiscardUnknown() { + xxx_messageInfo_QuantileSketchVector.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantileSketchVector proto.InternalMessageInfo + +func (m *QuantileSketchVector) GetSamples() []*QuantileSketchSample { + if m != nil { + return m.Samples + } + return nil +} + +type QuantileSketchSample struct { + F *QuantileSketch `protobuf:"bytes,1,opt,name=f,proto3" json:"f,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + Metric []*LabelPair `protobuf:"bytes,3,rep,name=metric,proto3" json:"metric,omitempty"` +} + +func (m *QuantileSketchSample) Reset() { *m = QuantileSketchSample{} } +func (*QuantileSketchSample) ProtoMessage() {} +func (*QuantileSketchSample) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{2} +} +func (m *QuantileSketchSample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuantileSketchSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuantileSketchSample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuantileSketchSample) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantileSketchSample.Merge(m, src) +} +func (m *QuantileSketchSample) XXX_Size() int { + return m.Size() +} +func (m *QuantileSketchSample) XXX_DiscardUnknown() { + xxx_messageInfo_QuantileSketchSample.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantileSketchSample proto.InternalMessageInfo + +func (m *QuantileSketchSample) GetF() *QuantileSketch { + if m != nil { + return m.F + } + return nil +} + +func (m *QuantileSketchSample) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +func (m *QuantileSketchSample) GetMetric() []*LabelPair { + if m != nil { + return m.Metric + } + return nil +} + +type QuantileSketch struct { + // Types that are valid to be assigned to Sketch: + // *QuantileSketch_Tdigest + // *QuantileSketch_Ddsketch + Sketch isQuantileSketch_Sketch `protobuf_oneof:"sketch"` +} + +func (m *QuantileSketch) Reset() { *m = QuantileSketch{} } +func (*QuantileSketch) ProtoMessage() {} +func (*QuantileSketch) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{3} +} +func (m *QuantileSketch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuantileSketch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuantileSketch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuantileSketch) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantileSketch.Merge(m, src) +} +func (m *QuantileSketch) XXX_Size() int { + return m.Size() +} +func (m *QuantileSketch) XXX_DiscardUnknown() { + xxx_messageInfo_QuantileSketch.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantileSketch proto.InternalMessageInfo + +type isQuantileSketch_Sketch interface { + isQuantileSketch_Sketch() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type QuantileSketch_Tdigest struct { + Tdigest *TDigest `protobuf:"bytes,1,opt,name=tdigest,proto3,oneof"` +} +type QuantileSketch_Ddsketch struct { + Ddsketch []byte `protobuf:"bytes,2,opt,name=ddsketch,proto3,oneof"` +} + +func (*QuantileSketch_Tdigest) isQuantileSketch_Sketch() {} +func (*QuantileSketch_Ddsketch) isQuantileSketch_Sketch() {} + +func (m *QuantileSketch) GetSketch() isQuantileSketch_Sketch { + if m != nil { + return m.Sketch + } + return nil +} + +func (m *QuantileSketch) GetTdigest() *TDigest { + if x, ok := m.GetSketch().(*QuantileSketch_Tdigest); ok { + return x.Tdigest + } + return nil +} + +func (m *QuantileSketch) GetDdsketch() []byte { + if x, ok := m.GetSketch().(*QuantileSketch_Ddsketch); ok { + return x.Ddsketch + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*QuantileSketch) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*QuantileSketch_Tdigest)(nil), + (*QuantileSketch_Ddsketch)(nil), + } +} + +// "Large" bytes format from https://github.com/tdunning/t-digest +type TDigest struct { + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + Compression float64 `protobuf:"fixed64,3,opt,name=compression,proto3" json:"compression,omitempty"` + Processed []*TDigest_Centroid `protobuf:"bytes,4,rep,name=processed,proto3" json:"processed,omitempty"` +} + +func (m *TDigest) Reset() { *m = TDigest{} } +func (*TDigest) ProtoMessage() {} +func (*TDigest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{4} +} +func (m *TDigest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TDigest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TDigest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TDigest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TDigest.Merge(m, src) +} +func (m *TDigest) XXX_Size() int { + return m.Size() +} +func (m *TDigest) XXX_DiscardUnknown() { + xxx_messageInfo_TDigest.DiscardUnknown(m) +} + +var xxx_messageInfo_TDigest proto.InternalMessageInfo + +func (m *TDigest) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *TDigest) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +func (m *TDigest) GetCompression() float64 { + if m != nil { + return m.Compression + } + return 0 +} + +func (m *TDigest) GetProcessed() []*TDigest_Centroid { + if m != nil { + return m.Processed + } + return nil +} + +type TDigest_Centroid struct { + Mean float64 `protobuf:"fixed64,1,opt,name=mean,proto3" json:"mean,omitempty"` + Weight float64 `protobuf:"fixed64,2,opt,name=weight,proto3" json:"weight,omitempty"` +} + +func (m *TDigest_Centroid) Reset() { *m = TDigest_Centroid{} } +func (*TDigest_Centroid) ProtoMessage() {} +func (*TDigest_Centroid) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{4, 0} +} +func (m *TDigest_Centroid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TDigest_Centroid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TDigest_Centroid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TDigest_Centroid) XXX_Merge(src proto.Message) { + xxx_messageInfo_TDigest_Centroid.Merge(m, src) +} +func (m *TDigest_Centroid) XXX_Size() int { + return m.Size() +} +func (m *TDigest_Centroid) XXX_DiscardUnknown() { + xxx_messageInfo_TDigest_Centroid.DiscardUnknown(m) +} + +var xxx_messageInfo_TDigest_Centroid proto.InternalMessageInfo + +func (m *TDigest_Centroid) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *TDigest_Centroid) GetWeight() float64 { + if m != nil { + return m.Weight + } + return 0 +} + +type CountMinSketch struct { + Depth uint32 `protobuf:"varint,1,opt,name=depth,proto3" json:"depth,omitempty"` + Width uint32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"` + // counters is a matrix of depth * width. + Counters []uint32 `protobuf:"varint,3,rep,packed,name=counters,proto3" json:"counters,omitempty"` +} + +func (m *CountMinSketch) Reset() { *m = CountMinSketch{} } +func (*CountMinSketch) ProtoMessage() {} +func (*CountMinSketch) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{5} +} +func (m *CountMinSketch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CountMinSketch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CountMinSketch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CountMinSketch) XXX_Merge(src proto.Message) { + xxx_messageInfo_CountMinSketch.Merge(m, src) +} +func (m *CountMinSketch) XXX_Size() int { + return m.Size() +} +func (m *CountMinSketch) XXX_DiscardUnknown() { + xxx_messageInfo_CountMinSketch.DiscardUnknown(m) +} + +var xxx_messageInfo_CountMinSketch proto.InternalMessageInfo + +func (m *CountMinSketch) GetDepth() uint32 { + if m != nil { + return m.Depth + } + return 0 +} + +func (m *CountMinSketch) GetWidth() uint32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *CountMinSketch) GetCounters() []uint32 { + if m != nil { + return m.Counters + } + return nil +} + +type TopK struct { + Cms *CountMinSketch `protobuf:"bytes,1,opt,name=cms,proto3" json:"cms,omitempty"` + List []*TopK_Pair `protobuf:"bytes,2,rep,name=list,proto3" json:"list,omitempty"` + Hyperloglog []byte `protobuf:"bytes,3,opt,name=hyperloglog,proto3" json:"hyperloglog,omitempty"` +} + +func (m *TopK) Reset() { *m = TopK{} } +func (*TopK) ProtoMessage() {} +func (*TopK) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{6} +} +func (m *TopK) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopK) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopK.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopK) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopK.Merge(m, src) +} +func (m *TopK) XXX_Size() int { + return m.Size() +} +func (m *TopK) XXX_DiscardUnknown() { + xxx_messageInfo_TopK.DiscardUnknown(m) +} + +var xxx_messageInfo_TopK proto.InternalMessageInfo + +func (m *TopK) GetCms() *CountMinSketch { + if m != nil { + return m.Cms + } + return nil +} + +func (m *TopK) GetList() []*TopK_Pair { + if m != nil { + return m.List + } + return nil +} + +func (m *TopK) GetHyperloglog() []byte { + if m != nil { + return m.Hyperloglog + } + return nil +} + +type TopK_Pair struct { + Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *TopK_Pair) Reset() { *m = TopK_Pair{} } +func (*TopK_Pair) ProtoMessage() {} +func (*TopK_Pair) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{6, 0} +} +func (m *TopK_Pair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopK_Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopK_Pair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopK_Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopK_Pair.Merge(m, src) +} +func (m *TopK_Pair) XXX_Size() int { + return m.Size() +} +func (m *TopK_Pair) XXX_DiscardUnknown() { + xxx_messageInfo_TopK_Pair.DiscardUnknown(m) +} + +var xxx_messageInfo_TopK_Pair proto.InternalMessageInfo + +func (m *TopK_Pair) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *TopK_Pair) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +type TopKMatrix struct { + Values []*TopKMatrix_Vector `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (m *TopKMatrix) Reset() { *m = TopKMatrix{} } +func (*TopKMatrix) ProtoMessage() {} +func (*TopKMatrix) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{7} +} +func (m *TopKMatrix) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopKMatrix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopKMatrix.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopKMatrix) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopKMatrix.Merge(m, src) +} +func (m *TopKMatrix) XXX_Size() int { + return m.Size() +} +func (m *TopKMatrix) XXX_DiscardUnknown() { + xxx_messageInfo_TopKMatrix.DiscardUnknown(m) +} + +var xxx_messageInfo_TopKMatrix proto.InternalMessageInfo + +func (m *TopKMatrix) GetValues() []*TopKMatrix_Vector { + if m != nil { + return m.Values + } + return nil +} + +type TopKMatrix_Vector struct { + Topk *TopK `protobuf:"bytes,1,opt,name=topk,proto3" json:"topk,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` +} + +func (m *TopKMatrix_Vector) Reset() { *m = TopKMatrix_Vector{} } +func (*TopKMatrix_Vector) ProtoMessage() {} +func (*TopKMatrix_Vector) Descriptor() ([]byte, []int) { + return fileDescriptor_7f9fd40e59b87ff3, []int{7, 0} +} +func (m *TopKMatrix_Vector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopKMatrix_Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopKMatrix_Vector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopKMatrix_Vector) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopKMatrix_Vector.Merge(m, src) +} +func (m *TopKMatrix_Vector) XXX_Size() int { + return m.Size() +} +func (m *TopKMatrix_Vector) XXX_DiscardUnknown() { + xxx_messageInfo_TopKMatrix_Vector.DiscardUnknown(m) +} + +var xxx_messageInfo_TopKMatrix_Vector proto.InternalMessageInfo + +func (m *TopKMatrix_Vector) GetTopk() *TopK { + if m != nil { + return m.Topk + } + return nil +} + +func (m *TopKMatrix_Vector) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +func init() { + proto.RegisterType((*QuantileSketchMatrix)(nil), "logproto.QuantileSketchMatrix") + proto.RegisterType((*QuantileSketchVector)(nil), "logproto.QuantileSketchVector") + proto.RegisterType((*QuantileSketchSample)(nil), "logproto.QuantileSketchSample") + proto.RegisterType((*QuantileSketch)(nil), "logproto.QuantileSketch") + proto.RegisterType((*TDigest)(nil), "logproto.TDigest") + proto.RegisterType((*TDigest_Centroid)(nil), "logproto.TDigest.Centroid") + proto.RegisterType((*CountMinSketch)(nil), "logproto.CountMinSketch") + proto.RegisterType((*TopK)(nil), "logproto.TopK") + proto.RegisterType((*TopK_Pair)(nil), "logproto.TopK.Pair") + proto.RegisterType((*TopKMatrix)(nil), "logproto.TopKMatrix") + proto.RegisterType((*TopKMatrix_Vector)(nil), "logproto.TopKMatrix.Vector") +} + +func init() { proto.RegisterFile("pkg/logproto/sketch.proto", fileDescriptor_7f9fd40e59b87ff3) } + +var fileDescriptor_7f9fd40e59b87ff3 = []byte{ + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xd4, 0x4e, + 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xd4, 0xc5, 0x4c, 0xd6, 0xc6, + 0x28, 0xd1, 0xb8, 0x9b, 0x40, 0x42, 0x38, 0x83, 0x07, 0x12, 0x45, 0x71, 0x20, 0xc6, 0x70, 0x31, + 0x43, 0x3b, 0x74, 0x27, 0xdb, 0x76, 0x9a, 0xce, 0x2c, 0xe0, 0xcd, 0x4f, 0x60, 0x8c, 0x9f, 0xc2, + 0xab, 0x1f, 0xc1, 0x9b, 0x47, 0x8e, 0x1c, 0xa5, 0x5c, 0x3c, 0xf2, 0x11, 0xcc, 0x4c, 0xdb, 0x85, + 0x2e, 0x31, 0x7a, 0xda, 0x79, 0xbf, 0xf7, 0x7b, 0xbf, 0xf9, 0xcd, 0x7b, 0x7d, 0x0b, 0xf7, 0xd2, + 0x51, 0x38, 0x88, 0x64, 0x98, 0x66, 0x52, 0xcb, 0x81, 0x1a, 0x71, 0xed, 0x0f, 0xfb, 0x36, 0xc0, + 0xed, 0x0a, 0xee, 0x2e, 0xd5, 0x48, 0xd5, 0xa1, 0xa0, 0x79, 0xaf, 0x60, 0xf1, 0xcd, 0x98, 0x25, + 0x5a, 0x44, 0x7c, 0xd7, 0x96, 0x6f, 0x33, 0x9d, 0x89, 0x13, 0xbc, 0x06, 0xad, 0x23, 0x16, 0x8d, + 0xb9, 0x72, 0x51, 0xaf, 0xb1, 0xdc, 0x59, 0x21, 0xfd, 0x49, 0x61, 0x9d, 0xff, 0x96, 0xfb, 0x5a, + 0x66, 0xb4, 0x64, 0x7b, 0x3b, 0xd3, 0x7a, 0x45, 0x1e, 0xaf, 0xc3, 0x8c, 0x62, 0x71, 0x1a, 0xfd, + 0x5d, 0x70, 0xd7, 0xd2, 0x68, 0x45, 0xf7, 0x3e, 0xa1, 0x69, 0xc9, 0x82, 0x81, 0x1f, 0x01, 0x3a, + 0x74, 0x51, 0x0f, 0x2d, 0x77, 0x56, 0xdc, 0x3f, 0x89, 0x51, 0x74, 0x88, 0x1f, 0xc0, 0x9c, 0x16, + 0x31, 0x57, 0x9a, 0xc5, 0xe9, 0xfb, 0x58, 0xb9, 0xff, 0xf5, 0xd0, 0x72, 0x83, 0x76, 0x26, 0xd8, + 0xb6, 0xc2, 0x4f, 0xa1, 0x15, 0x73, 0x9d, 0x09, 0xdf, 0x6d, 0x58, 0x73, 0x77, 0xae, 0xf4, 0x5e, + 0xb2, 0x03, 0x1e, 0xed, 0x30, 0x91, 0xd1, 0x92, 0xe2, 0x85, 0xb0, 0x50, 0xbf, 0x04, 0x3f, 0x83, + 0x19, 0x1d, 0x88, 0x90, 0x2b, 0x5d, 0xfa, 0xb9, 0x7d, 0x55, 0xbf, 0xf7, 0xdc, 0x26, 0xb6, 0x1c, + 0x5a, 0x71, 0xf0, 0x7d, 0x68, 0x07, 0x41, 0x31, 0x2c, 0x6b, 0x66, 0x6e, 0xcb, 0xa1, 0x13, 0x64, + 0xa3, 0x0d, 0xad, 0xe2, 0xe4, 0x7d, 0x47, 0x30, 0x53, 0x96, 0xe3, 0x5b, 0xd0, 0x88, 0x45, 0x62, + 0xe5, 0x11, 0x35, 0x47, 0x8b, 0xb0, 0x13, 0x2b, 0x60, 0x10, 0x76, 0x82, 0x7b, 0xd0, 0xf1, 0x65, + 0x9c, 0x66, 0x5c, 0x29, 0x21, 0x13, 0xb7, 0x61, 0x33, 0xd7, 0x21, 0xbc, 0x0e, 0xb3, 0x69, 0x26, + 0x7d, 0xae, 0x14, 0x0f, 0xdc, 0xa6, 0x7d, 0x6a, 0xf7, 0x86, 0xd5, 0xfe, 0x26, 0x4f, 0x74, 0x26, + 0x45, 0x40, 0xaf, 0xc8, 0xdd, 0x35, 0x68, 0x57, 0x30, 0xc6, 0xd0, 0x8c, 0x39, 0xab, 0xcc, 0xd8, + 0x33, 0xbe, 0x0b, 0xad, 0x63, 0x2e, 0xc2, 0xa1, 0x2e, 0x0d, 0x95, 0x91, 0xf7, 0x0e, 0x16, 0x36, + 0xe5, 0x38, 0xd1, 0xdb, 0x22, 0x29, 0x9b, 0xb5, 0x08, 0xff, 0x07, 0x3c, 0xd5, 0x43, 0x5b, 0x3e, + 0x4f, 0x8b, 0xc0, 0xa0, 0xc7, 0x22, 0xd0, 0x45, 0x43, 0xe6, 0x69, 0x11, 0xe0, 0x2e, 0xb4, 0x7d, + 0x53, 0xcd, 0x33, 0x65, 0x27, 0x33, 0x4f, 0x27, 0xb1, 0xf7, 0x0d, 0x41, 0x73, 0x4f, 0xa6, 0x2f, + 0xf0, 0x13, 0x68, 0xf8, 0xb1, 0xba, 0xf9, 0x25, 0xd4, 0xef, 0xa5, 0x86, 0x84, 0x1f, 0x43, 0x33, + 0x12, 0xca, 0x98, 0x9c, 0x1a, 0xb3, 0x51, 0xea, 0xdb, 0x31, 0x5b, 0x82, 0xe9, 0xe5, 0xf0, 0x43, + 0xca, 0xb3, 0x48, 0x86, 0x91, 0x0c, 0x6d, 0x2f, 0xe7, 0xe8, 0x75, 0xa8, 0xbb, 0x02, 0x4d, 0xc3, + 0x37, 0xce, 0xf9, 0x11, 0x4f, 0x8a, 0xd1, 0xcf, 0xd2, 0x22, 0x30, 0xa8, 0x75, 0x5a, 0xbd, 0xc7, + 0x06, 0xde, 0x17, 0x04, 0x60, 0x6e, 0x2a, 0x97, 0x6c, 0x75, 0x6a, 0xc9, 0x96, 0xea, 0x7e, 0x0a, + 0x56, 0xbf, 0xbe, 0x61, 0xdd, 0xd7, 0xd0, 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x96, 0xe9, 0xa8, 0x7c, + 0xf9, 0x42, 0xbd, 0x98, 0xda, 0xdc, 0x3f, 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b, + 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x73, 0x82, 0xbe, 0xe6, 0x04, 0xfd, 0xc8, 0x09, 0x3a, 0xcd, + 0x09, 0xfa, 0x99, 0x13, 0xf4, 0x2b, 0x27, 0xce, 0x65, 0x4e, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e, + 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f, 0x43, 0xa1, 0x87, 0xe3, 0x83, 0xbe, 0x2f, 0xe3, 0x41, + 0x98, 0xb1, 0x43, 0x96, 0xb0, 0x41, 0x24, 0x47, 0x62, 0x70, 0xfd, 0xdf, 0xe6, 0xa0, 0x65, 0x7f, + 0x56, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x24, 0x9c, 0x74, 0xb7, 0xa9, 0x04, 0x00, 0x00, +} + +func (this *QuantileSketchMatrix) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketchMatrix) + if !ok { + that2, ok := that.(QuantileSketchMatrix) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + return true +} +func (this *QuantileSketchVector) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketchVector) + if !ok { + that2, ok := that.(QuantileSketchVector) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(that1.Samples[i]) { + return false + } + } + return true +} +func (this *QuantileSketchSample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketchSample) + if !ok { + that2, ok := that.(QuantileSketchSample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.F.Equal(that1.F) { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + if len(this.Metric) != len(that1.Metric) { + return false + } + for i := range this.Metric { + if !this.Metric[i].Equal(that1.Metric[i]) { + return false + } + } + return true +} +func (this *QuantileSketch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketch) + if !ok { + that2, ok := that.(QuantileSketch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Sketch == nil { + if this.Sketch != nil { + return false + } + } else if this.Sketch == nil { + return false + } else if !this.Sketch.Equal(that1.Sketch) { + return false + } + return true +} +func (this *QuantileSketch_Tdigest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketch_Tdigest) + if !ok { + that2, ok := that.(QuantileSketch_Tdigest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Tdigest.Equal(that1.Tdigest) { + return false + } + return true +} +func (this *QuantileSketch_Ddsketch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuantileSketch_Ddsketch) + if !ok { + that2, ok := that.(QuantileSketch_Ddsketch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Ddsketch, that1.Ddsketch) { + return false + } + return true +} +func (this *TDigest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TDigest) + if !ok { + that2, ok := that.(TDigest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Min != that1.Min { + return false + } + if this.Max != that1.Max { + return false + } + if this.Compression != that1.Compression { + return false + } + if len(this.Processed) != len(that1.Processed) { + return false + } + for i := range this.Processed { + if !this.Processed[i].Equal(that1.Processed[i]) { + return false + } + } + return true +} +func (this *TDigest_Centroid) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TDigest_Centroid) + if !ok { + that2, ok := that.(TDigest_Centroid) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Mean != that1.Mean { + return false + } + if this.Weight != that1.Weight { + return false + } + return true +} +func (this *CountMinSketch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CountMinSketch) + if !ok { + that2, ok := that.(CountMinSketch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Depth != that1.Depth { + return false + } + if this.Width != that1.Width { + return false + } + if len(this.Counters) != len(that1.Counters) { + return false + } + for i := range this.Counters { + if this.Counters[i] != that1.Counters[i] { + return false + } + } + return true +} +func (this *TopK) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TopK) + if !ok { + that2, ok := that.(TopK) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Cms.Equal(that1.Cms) { + return false + } + if len(this.List) != len(that1.List) { + return false + } + for i := range this.List { + if !this.List[i].Equal(that1.List[i]) { + return false + } + } + if !bytes.Equal(this.Hyperloglog, that1.Hyperloglog) { + return false + } + return true +} +func (this *TopK_Pair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TopK_Pair) + if !ok { + that2, ok := that.(TopK_Pair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Event != that1.Event { + return false + } + if this.Count != that1.Count { + return false + } + return true +} +func (this *TopKMatrix) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TopKMatrix) + if !ok { + that2, ok := that.(TopKMatrix) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + return true +} +func (this *TopKMatrix_Vector) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TopKMatrix_Vector) + if !ok { + that2, ok := that.(TopKMatrix_Vector) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Topk.Equal(that1.Topk) { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + return true +} +func (this *QuantileSketchMatrix) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.QuantileSketchMatrix{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuantileSketchVector) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.QuantileSketchVector{") + if this.Samples != nil { + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuantileSketchSample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.QuantileSketchSample{") + if this.F != nil { + s = append(s, "F: "+fmt.Sprintf("%#v", this.F)+",\n") + } + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + if this.Metric != nil { + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuantileSketch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.QuantileSketch{") + if this.Sketch != nil { + s = append(s, "Sketch: "+fmt.Sprintf("%#v", this.Sketch)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuantileSketch_Tdigest) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&logproto.QuantileSketch_Tdigest{` + + `Tdigest:` + fmt.Sprintf("%#v", this.Tdigest) + `}`}, ", ") + return s +} +func (this *QuantileSketch_Ddsketch) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&logproto.QuantileSketch_Ddsketch{` + + `Ddsketch:` + fmt.Sprintf("%#v", this.Ddsketch) + `}`}, ", ") + return s +} +func (this *TDigest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.TDigest{") + s = append(s, "Min: "+fmt.Sprintf("%#v", this.Min)+",\n") + s = append(s, "Max: "+fmt.Sprintf("%#v", this.Max)+",\n") + s = append(s, "Compression: "+fmt.Sprintf("%#v", this.Compression)+",\n") + if this.Processed != nil { + s = append(s, "Processed: "+fmt.Sprintf("%#v", this.Processed)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TDigest_Centroid) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.TDigest_Centroid{") + s = append(s, "Mean: "+fmt.Sprintf("%#v", this.Mean)+",\n") + s = append(s, "Weight: "+fmt.Sprintf("%#v", this.Weight)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CountMinSketch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.CountMinSketch{") + s = append(s, "Depth: "+fmt.Sprintf("%#v", this.Depth)+",\n") + s = append(s, "Width: "+fmt.Sprintf("%#v", this.Width)+",\n") + s = append(s, "Counters: "+fmt.Sprintf("%#v", this.Counters)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TopK) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.TopK{") + if this.Cms != nil { + s = append(s, "Cms: "+fmt.Sprintf("%#v", this.Cms)+",\n") + } + if this.List != nil { + s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") + } + s = append(s, "Hyperloglog: "+fmt.Sprintf("%#v", this.Hyperloglog)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TopK_Pair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.TopK_Pair{") + s = append(s, "Event: "+fmt.Sprintf("%#v", this.Event)+",\n") + s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TopKMatrix) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.TopKMatrix{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TopKMatrix_Vector) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.TopKMatrix_Vector{") + if this.Topk != nil { + s = append(s, "Topk: "+fmt.Sprintf("%#v", this.Topk)+",\n") + } + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSketch(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *QuantileSketchMatrix) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuantileSketchMatrix) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuantileSketchMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QuantileSketchVector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuantileSketchVector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuantileSketchVector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QuantileSketchSample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuantileSketchSample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuantileSketchSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TimestampMs != 0 { + i = encodeVarintSketch(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.F != nil { + { + size, err := m.F.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuantileSketch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuantileSketch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuantileSketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sketch != nil { + { + size := m.Sketch.Size() + i -= size + if _, err := m.Sketch.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *QuantileSketch_Tdigest) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *QuantileSketch_Tdigest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Tdigest != nil { + { + size, err := m.Tdigest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *QuantileSketch_Ddsketch) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *QuantileSketch_Ddsketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ddsketch != nil { + i -= len(m.Ddsketch) + copy(dAtA[i:], m.Ddsketch) + i = encodeVarintSketch(dAtA, i, uint64(len(m.Ddsketch))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *TDigest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TDigest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TDigest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Processed) > 0 { + for iNdEx := len(m.Processed) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Processed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Compression != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Compression)))) + i-- + dAtA[i] = 0x19 + } + if m.Max != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) + i-- + dAtA[i] = 0x11 + } + if m.Min != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *TDigest_Centroid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TDigest_Centroid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TDigest_Centroid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Weight != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Weight)))) + i-- + dAtA[i] = 0x11 + } + if m.Mean != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Mean)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *CountMinSketch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CountMinSketch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CountMinSketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + dAtA4 := make([]byte, len(m.Counters)*10) + var j3 int + for _, num := range m.Counters { + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintSketch(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x1a + } + if m.Width != 0 { + i = encodeVarintSketch(dAtA, i, uint64(m.Width)) + i-- + dAtA[i] = 0x10 + } + if m.Depth != 0 { + i = encodeVarintSketch(dAtA, i, uint64(m.Depth)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TopK) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopK) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopK) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hyperloglog) > 0 { + i -= len(m.Hyperloglog) + copy(dAtA[i:], m.Hyperloglog) + i = encodeVarintSketch(dAtA, i, uint64(len(m.Hyperloglog))) + i-- + dAtA[i] = 0x1a + } + if len(m.List) > 0 { + for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.List[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Cms != nil { + { + size, err := m.Cms.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TopK_Pair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopK_Pair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopK_Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintSketch(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x10 + } + if len(m.Event) > 0 { + i -= len(m.Event) + copy(dAtA[i:], m.Event) + i = encodeVarintSketch(dAtA, i, uint64(len(m.Event))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TopKMatrix) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopKMatrix) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopKMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TopKMatrix_Vector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopKMatrix_Vector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopKMatrix_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimestampMs != 0 { + i = encodeVarintSketch(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.Topk != nil { + { + size, err := m.Topk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSketch(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSketch(dAtA []byte, offset int, v uint64) int { + offset -= sovSketch(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QuantileSketchMatrix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + return n +} + +func (m *QuantileSketchVector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + return n +} + +func (m *QuantileSketchSample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.F != nil { + l = m.F.Size() + n += 1 + l + sovSketch(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovSketch(uint64(m.TimestampMs)) + } + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + return n +} + +func (m *QuantileSketch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sketch != nil { + n += m.Sketch.Size() + } + return n +} + +func (m *QuantileSketch_Tdigest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tdigest != nil { + l = m.Tdigest.Size() + n += 1 + l + sovSketch(uint64(l)) + } + return n +} +func (m *QuantileSketch_Ddsketch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ddsketch != nil { + l = len(m.Ddsketch) + n += 1 + l + sovSketch(uint64(l)) + } + return n +} +func (m *TDigest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Min != 0 { + n += 9 + } + if m.Max != 0 { + n += 9 + } + if m.Compression != 0 { + n += 9 + } + if len(m.Processed) > 0 { + for _, e := range m.Processed { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + return n +} + +func (m *TDigest_Centroid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mean != 0 { + n += 9 + } + if m.Weight != 0 { + n += 9 + } + return n +} + +func (m *CountMinSketch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Depth != 0 { + n += 1 + sovSketch(uint64(m.Depth)) + } + if m.Width != 0 { + n += 1 + sovSketch(uint64(m.Width)) + } + if len(m.Counters) > 0 { + l = 0 + for _, e := range m.Counters { + l += sovSketch(uint64(e)) + } + n += 1 + sovSketch(uint64(l)) + l + } + return n +} + +func (m *TopK) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cms != nil { + l = m.Cms.Size() + n += 1 + l + sovSketch(uint64(l)) + } + if len(m.List) > 0 { + for _, e := range m.List { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + l = len(m.Hyperloglog) + if l > 0 { + n += 1 + l + sovSketch(uint64(l)) + } + return n +} + +func (m *TopK_Pair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Event) + if l > 0 { + n += 1 + l + sovSketch(uint64(l)) + } + if m.Count != 0 { + n += 1 + sovSketch(uint64(m.Count)) + } + return n +} + +func (m *TopKMatrix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovSketch(uint64(l)) + } + } + return n +} + +func (m *TopKMatrix_Vector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Topk != nil { + l = m.Topk.Size() + n += 1 + l + sovSketch(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovSketch(uint64(m.TimestampMs)) + } + return n +} + +func sovSketch(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSketch(x uint64) (n int) { + return sovSketch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *QuantileSketchMatrix) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*QuantileSketchVector{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "QuantileSketchVector", "QuantileSketchVector", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&QuantileSketchMatrix{`, + `Values:` + repeatedStringForValues + `,`, + `}`, + }, "") + return s +} +func (this *QuantileSketchVector) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]*QuantileSketchSample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(f.String(), "QuantileSketchSample", "QuantileSketchSample", 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&QuantileSketchVector{`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *QuantileSketchSample) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetric := "[]*LabelPair{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "LabelPair", "LabelPair", 1) + "," + } + repeatedStringForMetric += "}" + s := strings.Join([]string{`&QuantileSketchSample{`, + `F:` + strings.Replace(this.F.String(), "QuantileSketch", "QuantileSketch", 1) + `,`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `Metric:` + repeatedStringForMetric + `,`, + `}`, + }, "") + return s +} +func (this *QuantileSketch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuantileSketch{`, + `Sketch:` + fmt.Sprintf("%v", this.Sketch) + `,`, + `}`, + }, "") + return s +} +func (this *QuantileSketch_Tdigest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuantileSketch_Tdigest{`, + `Tdigest:` + strings.Replace(fmt.Sprintf("%v", this.Tdigest), "TDigest", "TDigest", 1) + `,`, + `}`, + }, "") + return s +} +func (this *QuantileSketch_Ddsketch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuantileSketch_Ddsketch{`, + `Ddsketch:` + fmt.Sprintf("%v", this.Ddsketch) + `,`, + `}`, + }, "") + return s +} +func (this *TDigest) String() string { + if this == nil { + return "nil" + } + repeatedStringForProcessed := "[]*TDigest_Centroid{" + for _, f := range this.Processed { + repeatedStringForProcessed += strings.Replace(fmt.Sprintf("%v", f), "TDigest_Centroid", "TDigest_Centroid", 1) + "," + } + repeatedStringForProcessed += "}" + s := strings.Join([]string{`&TDigest{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Compression:` + fmt.Sprintf("%v", this.Compression) + `,`, + `Processed:` + repeatedStringForProcessed + `,`, + `}`, + }, "") + return s +} +func (this *TDigest_Centroid) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TDigest_Centroid{`, + `Mean:` + fmt.Sprintf("%v", this.Mean) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *CountMinSketch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CountMinSketch{`, + `Depth:` + fmt.Sprintf("%v", this.Depth) + `,`, + `Width:` + fmt.Sprintf("%v", this.Width) + `,`, + `Counters:` + fmt.Sprintf("%v", this.Counters) + `,`, + `}`, + }, "") + return s +} +func (this *TopK) String() string { + if this == nil { + return "nil" + } + repeatedStringForList := "[]*TopK_Pair{" + for _, f := range this.List { + repeatedStringForList += strings.Replace(fmt.Sprintf("%v", f), "TopK_Pair", "TopK_Pair", 1) + "," + } + repeatedStringForList += "}" + s := strings.Join([]string{`&TopK{`, + `Cms:` + strings.Replace(this.Cms.String(), "CountMinSketch", "CountMinSketch", 1) + `,`, + `List:` + repeatedStringForList + `,`, + `Hyperloglog:` + fmt.Sprintf("%v", this.Hyperloglog) + `,`, + `}`, + }, "") + return s +} +func (this *TopK_Pair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopK_Pair{`, + `Event:` + fmt.Sprintf("%v", this.Event) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `}`, + }, "") + return s +} +func (this *TopKMatrix) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*TopKMatrix_Vector{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(fmt.Sprintf("%v", f), "TopKMatrix_Vector", "TopKMatrix_Vector", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&TopKMatrix{`, + `Values:` + repeatedStringForValues + `,`, + `}`, + }, "") + return s +} +func (this *TopKMatrix_Vector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopKMatrix_Vector{`, + `Topk:` + strings.Replace(this.Topk.String(), "TopK", "TopK", 1) + `,`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `}`, + }, "") + return s +} +func valueToStringSketch(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *QuantileSketchMatrix) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuantileSketchMatrix: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuantileSketchMatrix: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &QuantileSketchVector{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuantileSketchVector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuantileSketchVector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuantileSketchVector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &QuantileSketchSample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuantileSketchSample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuantileSketchSample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuantileSketchSample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field F", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.F == nil { + m.F = &QuantileSketch{} + } + if err := m.F.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, &LabelPair{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuantileSketch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuantileSketch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuantileSketch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tdigest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TDigest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sketch = &QuantileSketch_Tdigest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ddsketch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Sketch = &QuantileSketch_Ddsketch{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TDigest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TDigest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TDigest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Min = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Max = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Compression = float64(math.Float64frombits(v)) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Processed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Processed = append(m.Processed, &TDigest_Centroid{}) + if err := m.Processed[len(m.Processed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TDigest_Centroid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Centroid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Centroid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Mean", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Mean = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Weight = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CountMinSketch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CountMinSketch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CountMinSketch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Depth", wireType) + } + m.Depth = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Depth |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) + } + m.Width = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Width |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Counters = append(m.Counters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Counters) == 0 { + m.Counters = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Counters = append(m.Counters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopK) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopK: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopK: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cms == nil { + m.Cms = &CountMinSketch{} + } + if err := m.Cms.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.List = append(m.List, &TopK_Pair{}) + if err := m.List[len(m.List)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hyperloglog", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hyperloglog = append(m.Hyperloglog[:0], dAtA[iNdEx:postIndex]...) + if m.Hyperloglog == nil { + m.Hyperloglog = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopK_Pair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Event = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopKMatrix) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopKMatrix: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopKMatrix: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &TopKMatrix_Vector{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopKMatrix_Vector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSketch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSketch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topk == nil { + m.Topk = &TopK{} + } + if err := m.Topk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSketch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSketch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSketch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSketch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSketch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSketch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSketch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSketch + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthSketch + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSketch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSketch(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthSketch + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSketch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSketch = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/grafana/loki/pkg/logproto/sketch.proto b/vendor/github.com/grafana/loki/pkg/logproto/sketch.proto new file mode 100644 index 00000000..d8ffeb01 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logproto/sketch.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package logproto; + +import "pkg/logproto/logproto.proto"; + +option go_package = "github.com/grafana/loki/pkg/logproto"; + +message QuantileSketchMatrix { + repeated QuantileSketchVector values = 1; +} + +message QuantileSketchVector { + repeated QuantileSketchSample samples = 1; +} + +message QuantileSketchSample { + QuantileSketch f = 1; + int64 timestamp_ms = 2; + repeated LabelPair metric = 3; +} + +message QuantileSketch { + oneof sketch { + TDigest tdigest = 1; + bytes ddsketch = 2; // Use binary encoding for DDSketch. + } +} + +// "Large" bytes format from https://github.com/tdunning/t-digest +message TDigest { + double min = 1; + double max = 2; + double compression = 3; + + message Centroid { + double mean = 1; + double weight = 2; + } + repeated Centroid processed = 4; +} + +message CountMinSketch { + uint32 depth = 1; + uint32 width = 2; + + // counters is a matrix of depth * width. + repeated uint32 counters = 3; +} + +message TopK { + CountMinSketch cms = 1; + + message Pair { + string event = 1; + uint32 count = 2; + } + repeated Pair list = 2; + + bytes hyperloglog = 3; +} + +message TopKMatrix { + message Vector { + TopK topk = 1; + int64 timestamp_ms = 2; + } + + repeated Vector values = 1; +} diff --git a/vendor/github.com/grafana/loki/pkg/logproto/timeseries.go b/vendor/github.com/grafana/loki/pkg/logproto/timeseries.go index 2bf4ae15..377b82bb 100644 --- a/vendor/github.com/grafana/loki/pkg/logproto/timeseries.go +++ b/vendor/github.com/grafana/loki/pkg/logproto/timeseries.go @@ -2,13 +2,7 @@ package logproto import ( "flag" - "fmt" - "io" - "strings" "sync" - "unsafe" - - "github.com/prometheus/prometheus/model/labels" ) var ( @@ -70,201 +64,6 @@ func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { return p.TimeSeries.Unmarshal(dAtA) } -// LabelAdapter is a labels.Label that can be marshalled to/from protos. -type LabelAdapter labels.Label - -// Marshal implements proto.Marshaller. -func (bs *LabelAdapter) Marshal() ([]byte, error) { - size := bs.Size() - buf := make([]byte, size) - n, err := bs.MarshalToSizedBuffer(buf[:size]) - if err != nil { - return nil, err - } - return buf[:n], err -} - -func (bs *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { - size := bs.Size() - return bs.MarshalToSizedBuffer(dAtA[:size]) -} - -// MarshalTo implements proto.Marshaller. -func (bs *LabelAdapter) MarshalToSizedBuffer(buf []byte) (n int, err error) { - ls := (*labels.Label)(bs) - i := len(buf) - if len(ls.Value) > 0 { - i -= len(ls.Value) - copy(buf[i:], ls.Value) - i = encodeVarintMetrics(buf, i, uint64(len(ls.Value))) - i-- - buf[i] = 0x12 - } - if len(ls.Name) > 0 { - i -= len(ls.Name) - copy(buf[i:], ls.Name) - i = encodeVarintMetrics(buf, i, uint64(len(ls.Name))) - i-- - buf[i] = 0xa - } - return len(buf) - i, nil -} - -// Unmarshal a LabelAdapter, implements proto.Unmarshaller. -// NB this is a copy of the autogenerated code to unmarshal a LabelPair, -// with the byte copying replaced with a yoloString. -func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Name = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Value = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func yoloString(buf []byte) string { - return *((*string)(unsafe.Pointer(&buf))) -} - -// Size implements proto.Sizer. -func (bs *LabelAdapter) Size() (n int) { - ls := (*labels.Label)(bs) - if bs == nil { - return 0 - } - var l int - _ = l - l = len(ls.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(ls.Value) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} - -// Equal implements proto.Equaler. -func (bs *LabelAdapter) Equal(other LabelAdapter) bool { - return bs.Name == other.Name && bs.Value == other.Value -} - -// Compare implements proto.Comparer. -func (bs *LabelAdapter) Compare(other LabelAdapter) int { - if c := strings.Compare(bs.Name, other.Name); c != 0 { - return c - } - return strings.Compare(bs.Value, other.Value) -} - // PreallocTimeseriesSliceFromPool retrieves a slice of PreallocTimeseries from a sync.Pool. // ReuseSlice should be called once done. func PreallocTimeseriesSliceFromPool() []PreallocTimeseries { diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/drop_labels.go b/vendor/github.com/grafana/loki/pkg/logql/log/drop_labels.go new file mode 100644 index 00000000..7e6b5e0b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/drop_labels.go @@ -0,0 +1,86 @@ +package log + +import ( + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +type DropLabels struct { + dropLabels []DropLabel +} + +type DropLabel struct { + Matcher *labels.Matcher + Name string +} + +func NewDropLabel(matcher *labels.Matcher, name string) DropLabel { + return DropLabel{ + Matcher: matcher, + Name: name, + } +} + +func NewDropLabels(dl []DropLabel) *DropLabels { + return &DropLabels{dropLabels: dl} +} + +func (dl *DropLabels) Process(_ int64, line []byte, lbls *LabelsBuilder) ([]byte, bool) { + for _, dropLabel := range dl.dropLabels { + if dropLabel.Matcher != nil { + dropLabelMatches(dropLabel.Matcher, lbls) + continue + } + name := dropLabel.Name + dropLabelNames(name, lbls) + } + return line, true +} + +func (dl *DropLabels) RequiredLabelNames() []string { return []string{} } + +func isErrorLabel(name string) bool { + return name == logqlmodel.ErrorLabel +} + +func isErrorDetailsLabel(name string) bool { + return name == logqlmodel.ErrorDetailsLabel +} + +func dropLabelNames(name string, lbls *LabelsBuilder) { + if isErrorLabel(name) { + lbls.ResetError() + return + } + if isErrorDetailsLabel(name) { + lbls.ResetErrorDetails() + return + } + if _, ok := lbls.Get(name); ok { + lbls.Del(name) + } +} + +func dropLabelMatches(matcher *labels.Matcher, lbls *LabelsBuilder) { + var value string + name := matcher.Name + if isErrorLabel(name) { + value = lbls.GetErr() + if matcher.Matches(value) { + lbls.ResetError() + } + return + } + if isErrorDetailsLabel(name) { + value = lbls.GetErrorDetails() + if matcher.Matches(value) { + lbls.ResetErrorDetails() + } + return + } + value, _ = lbls.Get(name) + if matcher.Matches(value) { + lbls.Del(name) + } +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/error.go b/vendor/github.com/grafana/loki/pkg/logql/log/error.go new file mode 100644 index 00000000..bab4015f --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/error.go @@ -0,0 +1,10 @@ +package log + +var ( + // Possible errors thrown by a log pipeline. + errJSON = "JSONParserErr" + errLogfmt = "LogfmtParserErr" + errSampleExtraction = "SampleExtractionErr" + errLabelFilter = "LabelFilterErr" + errTemplateFormat = "TemplateFormatErr" +) diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/filter.go b/vendor/github.com/grafana/loki/pkg/logql/log/filter.go new file mode 100644 index 00000000..7860ad88 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/filter.go @@ -0,0 +1,625 @@ +package log + +import ( + "bytes" + "fmt" + "unicode" + "unicode/utf8" + + "github.com/grafana/loki/pkg/util" + + "github.com/grafana/regexp" + "github.com/grafana/regexp/syntax" + + "github.com/prometheus/prometheus/model/labels" +) + +// Filterer is a interface to filter log lines. +type Filterer interface { + Filter(line []byte) bool + ToStage() Stage +} + +// LineFilterFunc is a syntax sugar for creating line filter from a function +type FiltererFunc func(line []byte) bool + +func (f FiltererFunc) Filter(line []byte) bool { + return f(line) +} + +type trueFilter struct{} + +func (trueFilter) Filter(_ []byte) bool { return true } +func (trueFilter) ToStage() Stage { return NoopStage } + +// TrueFilter is a filter that returns and matches all log lines whatever their content. +var TrueFilter = trueFilter{} + +type existsFilter struct{} + +func (e existsFilter) Filter(line []byte) bool { + return len(line) > 0 +} + +func (e existsFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, e.Filter(line) + }, + } +} + +// ExistsFilter is a filter that returns and matches when a line has any characters. +var ExistsFilter = existsFilter{} + +type notFilter struct { + Filterer +} + +func (n notFilter) Filter(line []byte) bool { + return !n.Filterer.Filter(line) +} + +func (n notFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, n.Filter(line) + }, + } +} + +// NewNotFilter creates a new filter which matches only if the base filter doesn't match. +// If the base filter is a `or` it will recursively simplify with `and` operations. +func NewNotFilter(base Filterer) Filterer { + // not(a|b) = not(a) and not(b) , and operation can't benefit from this optimization because both legs always needs to be executed. + if or, ok := base.(orFilter); ok { + return NewAndFilter(NewNotFilter(or.left), NewNotFilter(or.right)) + } + return notFilter{Filterer: base} +} + +type andFilter struct { + left Filterer + right Filterer +} + +// NewAndFilter creates a new filter which matches only if left and right matches. +func NewAndFilter(left Filterer, right Filterer) Filterer { + // Make sure we take care of panics in case a nil or noop filter is passed. + if right == nil || right == TrueFilter { + return left + } + + if left == nil || left == TrueFilter { + return right + } + + return andFilter{ + left: left, + right: right, + } +} + +func (a andFilter) Filter(line []byte) bool { + return a.left.Filter(line) && a.right.Filter(line) +} + +func (a andFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, a.Filter(line) + }, + } +} + +type andFilters struct { + filters []Filterer +} + +// NewAndFilters creates a new filter which matches only if all filters match +func NewAndFilters(filters []Filterer) Filterer { + var containsFilterAcc *containsAllFilter + regexpFilters := make([]Filterer, 0) + n := 0 + for _, filter := range filters { + // Make sure we take care of panics in case a nil or noop filter is passed. + if !(filter == nil || filter == TrueFilter) { + switch c := filter.(type) { + case *containsFilter: + // Start accumulating contains filters. + if containsFilterAcc == nil { + containsFilterAcc = &containsAllFilter{} + } + + // Join all contain filters. + containsFilterAcc.Add(*c) + case regexpFilter: + regexpFilters = append(regexpFilters, c) + + default: + // Finish accumulating contains filters. + if containsFilterAcc != nil { + filters[n] = containsFilterAcc + n++ + containsFilterAcc = nil + } + + // Keep filter + filters[n] = filter + n++ + } + } + } + filters = filters[:n] + + if containsFilterAcc != nil { + filters = append(filters, containsFilterAcc) + } + + // Push regex filters to end + if len(regexpFilters) > 0 { + filters = append(filters, regexpFilters...) + } + + if len(filters) == 0 { + return TrueFilter + } else if len(filters) == 1 { + return filters[0] + } + + return andFilters{ + filters: filters, + } +} + +func (a andFilters) Filter(line []byte) bool { + for _, filter := range a.filters { + if !filter.Filter(line) { + return false + } + } + return true +} + +func (a andFilters) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, a.Filter(line) + }, + } +} + +type orFilter struct { + left Filterer + right Filterer +} + +// newOrFilter creates a new filter which matches only if left or right matches. +func newOrFilter(left Filterer, right Filterer) Filterer { + if left == nil || left == TrueFilter { + return right + } + + if right == nil || right == TrueFilter { + return left + } + + return orFilter{ + left: left, + right: right, + } +} + +// ChainOrFilter is a syntax sugar to chain multiple `or` filters. (1 or many) +func ChainOrFilter(curr, new Filterer) Filterer { + if curr == nil { + return new + } + return newOrFilter(curr, new) +} + +func (a orFilter) Filter(line []byte) bool { + return a.left.Filter(line) || a.right.Filter(line) +} + +func (a orFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, a.Filter(line) + }, + } +} + +type regexpFilter struct { + *regexp.Regexp + + orig string +} + +// newRegexpFilter creates a new line filter for a given regexp. +// If match is false the filter is the negation of the regexp. +func newRegexpFilter(re string, orig string, match bool) (Filterer, error) { + reg, err := regexp.Compile(re) + if err != nil { + return nil, err + } + f := regexpFilter{Regexp: reg, orig: orig} + if match { + return f, nil + } + return NewNotFilter(f), nil +} + +func (r regexpFilter) Filter(line []byte) bool { + return r.Match(line) +} + +func (r regexpFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, r.Filter(line) + }, + } +} + +func (r regexpFilter) String() string { + return r.orig +} + +type equalFilter struct { + match []byte + caseInsensitive bool +} + +func (l equalFilter) Filter(line []byte) bool { + if len(l.match) != len(line) { + return false + } + + return contains(line, l.match, l.caseInsensitive) +} + +func (l equalFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, l.Filter(line) + }, + } +} + +func (l equalFilter) String() string { + return string(l.match) +} + +func newEqualFilter(match []byte, caseInsensitive bool) Filterer { + return equalFilter{match, caseInsensitive} +} + +type containsFilter struct { + match []byte + caseInsensitive bool +} + +func (l *containsFilter) Filter(line []byte) bool { + return contains(line, l.match, l.caseInsensitive) +} + +func contains(line, substr []byte, caseInsensitive bool) bool { + if !caseInsensitive { + return bytes.Contains(line, substr) + } + return containsLower(line, substr) +} + +func containsLower(line, substr []byte) bool { + if len(substr) == 0 { + return true + } + if len(substr) > len(line) { + return false + } + j := 0 + for len(line) > 0 { + // ascii fast case + if c := line[0]; c < utf8.RuneSelf && substr[j] < utf8.RuneSelf { + if c == substr[j] || c+'a'-'A' == substr[j] || c == substr[j]+'a'-'A' { + j++ + if j == len(substr) { + return true + } + line = line[1:] + continue + } + line = line[1:] + j = 0 + continue + } + // unicode slow case + lr, lwid := utf8.DecodeRune(line) + mr, mwid := utf8.DecodeRune(substr[j:]) + if lr == mr || mr == unicode.To(unicode.LowerCase, lr) { + j += mwid + if j == len(substr) { + return true + } + line = line[lwid:] + continue + } + line = line[lwid:] + j = 0 + } + return false +} + +func (l containsFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, l.Filter(line) + }, + } +} + +func (l containsFilter) String() string { + return string(l.match) +} + +// newContainsFilter creates a contains filter that checks if a log line contains a match. +func newContainsFilter(match []byte, caseInsensitive bool) Filterer { + if len(match) == 0 { + return TrueFilter + } + if caseInsensitive { + match = bytes.ToLower(match) + } + return &containsFilter{ + match: match, + caseInsensitive: caseInsensitive, + } +} + +type containsAllFilter struct { + matches []containsFilter +} + +func (f *containsAllFilter) Add(filter containsFilter) { + f.matches = append(f.matches, filter) +} + +func (f *containsAllFilter) Empty() bool { + return len(f.matches) == 0 +} + +func (f containsAllFilter) Filter(line []byte) bool { + for _, m := range f.matches { + if !contains(line, m.match, m.caseInsensitive) { + return false + } + } + return true +} + +func (f containsAllFilter) ToStage() Stage { + return StageFunc{ + process: func(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, f.Filter(line) + }, + } +} + +// NewFilter creates a new line filter from a match string and type. +func NewFilter(match string, mt labels.MatchType) (Filterer, error) { + switch mt { + case labels.MatchRegexp: + return parseRegexpFilter(match, true, false) + case labels.MatchNotRegexp: + return parseRegexpFilter(match, false, false) + case labels.MatchEqual: + return newContainsFilter([]byte(match), false), nil + case labels.MatchNotEqual: + return NewNotFilter(newContainsFilter([]byte(match), false)), nil + default: + return nil, fmt.Errorf("unknown matcher: %v", match) + } +} + +// NewLabelFilter creates a new filter that has label regex semantics +func NewLabelFilter(match string, mt labels.MatchType) (Filterer, error) { + switch mt { + case labels.MatchRegexp: + return parseRegexpFilter(match, true, true) + case labels.MatchNotRegexp: + return parseRegexpFilter(match, false, true) + case labels.MatchEqual: + return newEqualFilter([]byte(match), false), nil + case labels.MatchNotEqual: + return NewNotFilter(newEqualFilter([]byte(match), false)), nil + default: + return nil, fmt.Errorf("unknown matcher: %v", match) + } +} + +// parseRegexpFilter parses a regexp and attempt to simplify it with only literal filters. +// If not possible it will returns the original regexp filter. +func parseRegexpFilter(re string, match bool, isLabel bool) (Filterer, error) { + reg, err := syntax.Parse(re, syntax.Perl) + if err != nil { + return nil, err + } + reg = reg.Simplify() + + // attempt to improve regex with tricks + f, ok := simplify(reg, isLabel) + if !ok { + util.AllNonGreedy(reg) + regex := reg.String() + if isLabel { + // label regexes are anchored to + // the beginning and ending of lines + regex = "^(?:" + regex + ")$" + } + return newRegexpFilter(regex, re, match) + } + if match { + return f, nil + } + return NewNotFilter(f), nil +} + +// simplify a regexp expression by replacing it, when possible, with a succession of literal filters. +// For example `(foo|bar)` will be replaced by `containsFilter(foo) or containsFilter(bar)` +func simplify(reg *syntax.Regexp, isLabel bool) (Filterer, bool) { + switch reg.Op { + case syntax.OpAlternate: + return simplifyAlternate(reg, isLabel) + case syntax.OpConcat: + return simplifyConcat(reg, nil) + case syntax.OpCapture: + util.ClearCapture(reg) + return simplify(reg, isLabel) + case syntax.OpLiteral: + if isLabel { + return newEqualFilter([]byte(string(reg.Rune)), util.IsCaseInsensitive(reg)), true + } + return newContainsFilter([]byte(string(reg.Rune)), util.IsCaseInsensitive(reg)), true + case syntax.OpStar: + if reg.Sub[0].Op == syntax.OpAnyCharNotNL { + return TrueFilter, true + } + case syntax.OpPlus: + if len(reg.Sub) == 1 && reg.Sub[0].Op == syntax.OpAnyCharNotNL { // simplify ".+" + return ExistsFilter, true + } + case syntax.OpEmptyMatch: + return TrueFilter, true + } + return nil, false +} + +// simplifyAlternate simplifies, when possible, alternate regexp expressions such as: +// (foo|bar) or (foo|(bar|buzz)). +func simplifyAlternate(reg *syntax.Regexp, isLabel bool) (Filterer, bool) { + util.ClearCapture(reg.Sub...) + // attempt to simplify the first leg + f, ok := simplify(reg.Sub[0], isLabel) + if !ok { + return nil, false + } + // merge the rest of the legs + for i := 1; i < len(reg.Sub); i++ { + f2, ok := simplify(reg.Sub[i], isLabel) + if !ok { + return nil, false + } + f = newOrFilter(f, f2) + } + return f, true +} + +// simplifyConcat attempt to simplify concat operations. +// Concat operations are either literal and star such as foo.* .*foo.* .*foo +// which is a literalFilter. +// Or a literal and alternates operation (see simplifyConcatAlternate), which represent a multiplication of alternates. +// Anything else is rejected. +func simplifyConcat(reg *syntax.Regexp, baseLiteral []byte) (Filterer, bool) { + util.ClearCapture(reg.Sub...) + // remove empty match as we don't need them for filtering + i := 0 + for _, r := range reg.Sub { + if r.Op == syntax.OpEmptyMatch { + continue + } + reg.Sub[i] = r + i++ + } + reg.Sub = reg.Sub[:i] + // we support only simplication of concat operation with 3 sub expressions. + // for instance .*foo.*bar contains 4 subs (.*+foo+.*+bar) and can't be simplified. + if len(reg.Sub) > 3 { + return nil, false + } + + var curr Filterer + var ok bool + literals := 0 + var baseLiteralIsCaseInsensitive bool + for _, sub := range reg.Sub { + if sub.Op == syntax.OpLiteral { + // only one literal is allowed. + if literals != 0 { + return nil, false + } + literals++ + baseLiteral = append(baseLiteral, []byte(string(sub.Rune))...) + baseLiteralIsCaseInsensitive = util.IsCaseInsensitive(sub) + continue + } + // if we have an alternate we must also have a base literal to apply the concatenation with. + if sub.Op == syntax.OpAlternate && baseLiteral != nil { + if curr, ok = simplifyConcatAlternate(sub, baseLiteral, curr, baseLiteralIsCaseInsensitive); !ok { + return nil, false + } + continue + } + if sub.Op == syntax.OpStar && sub.Sub[0].Op == syntax.OpAnyCharNotNL { + continue + } + return nil, false + } + + // if we have a filter from concat alternates. + if curr != nil { + return curr, true + } + + // if we have only a concat with literals. + if baseLiteral != nil { + return newContainsFilter(baseLiteral, baseLiteralIsCaseInsensitive), true + } + + return nil, false +} + +// simplifyConcatAlternate simplifies concat alternate operations. +// A concat alternate is found when a concat operation has a sub alternate and is preceded by a literal. +// For instance bar|b|buzz is expressed as b(ar|(?:)|uzz) => b concat alternate(ar,(?:),uzz). +// (?:) being an OpEmptyMatch and b being the literal to concat all alternates (ar,(?:),uzz) with. +func simplifyConcatAlternate(reg *syntax.Regexp, literal []byte, curr Filterer, baseLiteralIsCaseInsensitive bool) (Filterer, bool) { + for _, alt := range reg.Sub { + // we should not consider the case where baseLiteral is not marked as case insensitive + // and alternate expression is marked as case insensitive. For example, for the original expression + // f|f(?i)oo the extracted expression would be "f (?:)|(?i:OO)" i.e. f with empty match + // and fOO. For fOO, we can't initialize containsFilter with caseInsensitve variable as either true or false + isAltCaseInsensitive := util.IsCaseInsensitive(alt) + if !baseLiteralIsCaseInsensitive && isAltCaseInsensitive { + return nil, false + } + switch alt.Op { + case syntax.OpEmptyMatch: + curr = ChainOrFilter(curr, newContainsFilter(literal, baseLiteralIsCaseInsensitive)) + case syntax.OpLiteral: + // concat the root literal with the alternate one. + altBytes := []byte(string(alt.Rune)) + altLiteral := make([]byte, 0, len(literal)+len(altBytes)) + altLiteral = append(altLiteral, literal...) + altLiteral = append(altLiteral, altBytes...) + curr = ChainOrFilter(curr, newContainsFilter(altLiteral, baseLiteralIsCaseInsensitive)) + case syntax.OpConcat: + f, ok := simplifyConcat(alt, literal) + if !ok { + return nil, false + } + curr = ChainOrFilter(curr, f) + case syntax.OpStar: + if alt.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil, false + } + curr = ChainOrFilter(curr, newContainsFilter(literal, baseLiteralIsCaseInsensitive)) + default: + return nil, false + } + } + if curr != nil { + return curr, true + } + return nil, false +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/fmt.go b/vendor/github.com/grafana/loki/pkg/logql/log/fmt.go new file mode 100644 index 00000000..e28f5a11 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/fmt.go @@ -0,0 +1,502 @@ +package log + +import ( + "bytes" + "fmt" + "net/url" + "strconv" + "strings" + "text/template" + "text/template/parse" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/grafana/regexp" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +const ( + functionLineName = "__line__" + functionTimestampName = "__timestamp__" +) + +var ( + _ Stage = &LineFormatter{} + _ Stage = &LabelsFormatter{} + + // Available map of functions for the text template engine. + functionMap = template.FuncMap{ + // olds functions deprecated. + "ToLower": strings.ToLower, + "ToUpper": strings.ToUpper, + "Replace": strings.Replace, + "Trim": strings.Trim, + "TrimLeft": strings.TrimLeft, + "TrimRight": strings.TrimRight, + "TrimPrefix": strings.TrimPrefix, + "TrimSuffix": strings.TrimSuffix, + "TrimSpace": strings.TrimSpace, + "regexReplaceAll": func(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil + }, + "regexReplaceAllLiteral": func(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil + }, + "count": func(regexsubstr string, s string) (int, error) { + r, err := regexp.Compile(regexsubstr) + if err != nil { + return 0, err + } + matches := r.FindAllStringIndex(s, -1) + return len(matches), nil + }, + "urldecode": url.QueryUnescape, + "urlencode": url.QueryEscape, + "bytes": convertBytes, + "duration": convertDuration, + "duration_seconds": convertDuration, + "unixEpochMillis": unixEpochMillis, + "unixEpochNanos": unixEpochNanos, + "toDateInZone": toDateInZone, + "unixToTime": unixToTime, + "alignLeft": alignLeft, + "alignRight": alignRight, + } + + // sprig template functions + templateFunctions = []string{ + "b64enc", + "b64dec", + "lower", + "upper", + "title", + "trunc", + "substr", + "contains", + "hasPrefix", + "hasSuffix", + "indent", + "nindent", + "replace", + "repeat", + "trim", + "trimAll", + "trimSuffix", + "trimPrefix", + "int", + "float64", + "add", + "sub", + "mul", + "div", + "mod", + "addf", + "subf", + "mulf", + "divf", + "max", + "min", + "maxf", + "minf", + "ceil", + "floor", + "round", + "fromJson", + "date", + "toDate", + "now", + "unixEpoch", + "default", + } +) + +func addLineAndTimestampFunctions(currLine func() string, currTimestamp func() int64) map[string]interface{} { + functions := make(map[string]interface{}, len(functionMap)+2) + for k, v := range functionMap { + functions[k] = v + } + functions[functionLineName] = func() string { + return currLine() + } + functions[functionTimestampName] = func() time.Time { + return time.Unix(0, currTimestamp()) + } + return functions +} + +// toEpoch converts a string with Unix time to an time Value +func unixToTime(epoch string) (time.Time, error) { + var ct time.Time + l := len(epoch) + i, err := strconv.ParseInt(epoch, 10, 64) + if err != nil { + return ct, fmt.Errorf("unable to parse time '%v': %w", epoch, err) + } + switch l { + case 5: + // days 19373 + return time.Unix(i*86400, 0), nil + case 10: + // seconds 1673798889 + return time.Unix(i, 0), nil + case 13: + // milliseconds 1673798889902 + return time.Unix(0, i*1000*1000), nil + case 16: + // microseconds 1673798889902000 + return time.Unix(0, i*1000), nil + case 19: + // nanoseconds 1673798889902000000 + return time.Unix(0, i), nil + default: + return ct, fmt.Errorf("unable to parse time '%v': %w", epoch, err) + } +} + +func unixEpochMillis(date time.Time) string { + return strconv.FormatInt(date.UnixMilli(), 10) +} + +func unixEpochNanos(date time.Time) string { + return strconv.FormatInt(date.UnixNano(), 10) +} + +func toDateInZone(fmt, zone, str string) time.Time { + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + t, _ := time.ParseInLocation(fmt, str, loc) + return t +} + +func init() { + sprigFuncMap := sprig.GenericFuncMap() + for _, v := range templateFunctions { + if function, ok := sprigFuncMap[v]; ok { + functionMap[v] = function + } + } +} + +type LineFormatter struct { + *template.Template + buf *bytes.Buffer + + currentLine []byte + currentTs int64 +} + +// NewFormatter creates a new log line formatter from a given text template. +func NewFormatter(tmpl string) (*LineFormatter, error) { + lf := &LineFormatter{ + buf: bytes.NewBuffer(make([]byte, 4096)), + } + + functions := addLineAndTimestampFunctions(func() string { + return unsafeGetString(lf.currentLine) + }, func() int64 { + return lf.currentTs + }) + + t, err := template.New("line").Option("missingkey=zero").Funcs(functions).Parse(tmpl) + if err != nil { + return nil, fmt.Errorf("invalid line template: %w", err) + } + lf.Template = t + return lf, nil +} + +func (lf *LineFormatter) Process(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + lf.buf.Reset() + lf.currentLine = line + lf.currentTs = ts + + if err := lf.Template.Execute(lf.buf, lbs.Map()); err != nil { + lbs.SetErr(errTemplateFormat) + lbs.SetErrorDetails(err.Error()) + return line, true + } + return lf.buf.Bytes(), true +} + +func (lf *LineFormatter) RequiredLabelNames() []string { + return uniqueString(listNodeFields([]parse.Node{lf.Root})) +} + +func listNodeFields(nodes []parse.Node) []string { + var res []string + for _, node := range nodes { + switch node.Type() { + case parse.NodePipe: + res = append(res, listNodeFieldsFromPipe(node.(*parse.PipeNode))...) + case parse.NodeAction: + res = append(res, listNodeFieldsFromPipe(node.(*parse.ActionNode).Pipe)...) + case parse.NodeList: + res = append(res, listNodeFields(node.(*parse.ListNode).Nodes)...) + case parse.NodeCommand: + res = append(res, listNodeFields(node.(*parse.CommandNode).Args)...) + case parse.NodeIf, parse.NodeWith, parse.NodeRange: + res = append(res, listNodeFieldsFromBranch(node)...) + case parse.NodeField: + res = append(res, node.(*parse.FieldNode).Ident...) + } + } + return res +} + +func listNodeFieldsFromBranch(node parse.Node) []string { + var res []string + var b parse.BranchNode + switch node.Type() { + case parse.NodeIf: + b = node.(*parse.IfNode).BranchNode + case parse.NodeWith: + b = node.(*parse.WithNode).BranchNode + case parse.NodeRange: + b = node.(*parse.RangeNode).BranchNode + default: + return res + } + if b.Pipe != nil { + res = append(res, listNodeFieldsFromPipe(b.Pipe)...) + } + if b.List != nil { + res = append(res, listNodeFields(b.List.Nodes)...) + } + if b.ElseList != nil { + res = append(res, listNodeFields(b.ElseList.Nodes)...) + } + return res +} + +func listNodeFieldsFromPipe(p *parse.PipeNode) []string { + var res []string + for _, c := range p.Cmds { + res = append(res, listNodeFields(c.Args)...) + } + return res +} + +// LabelFmt is a configuration struct for formatting a label. +type LabelFmt struct { + Name string + Value string + + Rename bool +} + +// NewRenameLabelFmt creates a configuration to rename a label. +func NewRenameLabelFmt(dst, target string) LabelFmt { + return LabelFmt{ + Name: dst, + Rename: true, + Value: target, + } +} + +// NewTemplateLabelFmt creates a configuration to format a label using text template. +func NewTemplateLabelFmt(dst, template string) LabelFmt { + return LabelFmt{ + Name: dst, + Rename: false, + Value: template, + } +} + +type labelFormatter struct { + tmpl *template.Template + LabelFmt +} + +type LabelsFormatter struct { + formats []labelFormatter + buf *bytes.Buffer + + currentLine []byte + currentTs int64 +} + +// NewLabelsFormatter creates a new formatter that can format multiple labels at once. +// Either by renaming or using text template. +// It is not allowed to reformat the same label twice within the same formatter. +func NewLabelsFormatter(fmts []LabelFmt) (*LabelsFormatter, error) { + if err := validate(fmts); err != nil { + return nil, err + } + formats := make([]labelFormatter, 0, len(fmts)) + + lf := &LabelsFormatter{ + buf: bytes.NewBuffer(make([]byte, 1024)), + } + + functions := addLineAndTimestampFunctions(func() string { + return unsafeGetString(lf.currentLine) + }, func() int64 { + return lf.currentTs + }) + + for _, fm := range fmts { + toAdd := labelFormatter{LabelFmt: fm} + if !fm.Rename { + t, err := template.New("label").Option("missingkey=zero").Funcs(functions).Parse(fm.Value) + if err != nil { + return nil, fmt.Errorf("invalid template for label '%s': %s", fm.Name, err) + } + toAdd.tmpl = t + } + formats = append(formats, toAdd) + } + lf.formats = formats + return lf, nil +} + +func validate(fmts []LabelFmt) error { + // it would be too confusing to rename and change the same label value. + // To avoid confusion we allow to have a label name only once per stage. + uniqueLabelName := map[string]struct{}{} + for _, f := range fmts { + if f.Name == logqlmodel.ErrorLabel { + return fmt.Errorf("%s cannot be formatted", f.Name) + } + if _, ok := uniqueLabelName[f.Name]; ok { + return fmt.Errorf("multiple label name '%s' not allowed in a single format operation", f.Name) + } + uniqueLabelName[f.Name] = struct{}{} + } + return nil +} + +func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]byte, bool) { + lf.currentLine = l + lf.currentTs = ts + + var data interface{} + for _, f := range lf.formats { + if f.Rename { + v, category, ok := lbs.GetWithCategory(f.Value) + if ok { + lbs.Set(category, f.Name, v) + lbs.Del(f.Value) + } + continue + } + lf.buf.Reset() + if data == nil { + data = lbs.Map() + } + if err := f.tmpl.Execute(lf.buf, data); err != nil { + lbs.SetErr(errTemplateFormat) + lbs.SetErrorDetails(err.Error()) + continue + } + lbs.Set(ParsedLabel, f.Name, lf.buf.String()) + } + return l, true +} + +func (lf *LabelsFormatter) RequiredLabelNames() []string { + var names []string + for _, fm := range lf.formats { + if fm.Rename { + names = append(names, fm.Value) + continue + } + names = append(names, listNodeFields([]parse.Node{fm.tmpl.Root})...) + } + return uniqueString(names) +} + +func trunc(c int, s string) string { + runes := []rune(s) + l := len(runes) + if c < 0 && l+c > 0 { + return string(runes[l+c:]) + } + if c >= 0 && l > c { + return string(runes[:c]) + } + return s +} + +func alignLeft(count int, src string) string { + runes := []rune(src) + l := len(runes) + if count < 0 || count == l { + return src + } + pad := count - l + if pad > 0 { + return src + strings.Repeat(" ", pad) + } + return string(runes[:count]) +} + +func alignRight(count int, src string) string { + runes := []rune(src) + l := len(runes) + if count < 0 || count == l { + return src + } + pad := count - l + if pad > 0 { + return strings.Repeat(" ", pad) + src + } + return string(runes[l-count:]) +} + +type Decolorizer struct{} + +// RegExp to select ANSI characters courtesy of https://github.com/acarl005/stripansi +const ansiPattern = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var ansiRegex = regexp.MustCompile(ansiPattern) + +func NewDecolorizer() (*Decolorizer, error) { + return &Decolorizer{}, nil +} + +func (Decolorizer) Process(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return ansiRegex.ReplaceAll(line, []byte{}), true +} +func (Decolorizer) RequiredLabelNames() []string { return []string{} } + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + runes := []rune(s) + l := len(runes) + if end > l { + end = l + } + if start > l { + start = l + } + if start < 0 { + if end < 0 { + return "" + } + return string(runes[:end]) + } + if end < 0 { + return string(runes[start:]) + } + if start > end { + return "" + } + return string(runes[start:end]) +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/ip.go b/vendor/github.com/grafana/loki/pkg/logql/log/ip.go new file mode 100644 index 00000000..1508432d --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/ip.go @@ -0,0 +1,299 @@ +package log + +import ( + "errors" + "fmt" + "net/netip" + "unicode" + + "github.com/prometheus/prometheus/model/labels" + "go4.org/netipx" +) + +var ( + ErrIPFilterInvalidPattern = errors.New("ip: invalid pattern") + ErrIPFilterInvalidOperation = errors.New("ip: invalid operation") +) + +type IPMatchType int + +const ( + IPv4Charset = "0123456789." + IPv6Charset = "0123456789abcdefABCDEF:." +) + +// Should be one of the netip.Addr, netip.Prefix, netipx.IPRange. +type IPMatcher interface{} + +type IPLineFilter struct { + ip *ipFilter + ty labels.MatchType +} + +// NewIPLineFilter is used to construct ip filter as a `LineFilter` +func NewIPLineFilter(pattern string, ty labels.MatchType) (*IPLineFilter, error) { + // check if `ty` supported in ip matcher. + switch ty { + case labels.MatchEqual, labels.MatchNotEqual: + default: + return nil, ErrIPFilterInvalidOperation + } + + ip, err := newIPFilter(pattern) + if err != nil { + return nil, err + } + return &IPLineFilter{ + ip: ip, + ty: ty, + }, nil +} + +// Filter implement `Filterer` interface. Used by `LineFilter` +func (f *IPLineFilter) Filter(line []byte) bool { + return f.filterTy(line, f.ty) +} + +// ToStage implements `Filterer` interface. +func (f *IPLineFilter) ToStage() Stage { + return f +} + +// `Process` implements `Stage` interface +func (f *IPLineFilter) Process(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, f.filterTy(line, f.ty) +} + +// `RequiredLabelNames` implements `Stage` interface +func (f *IPLineFilter) RequiredLabelNames() []string { + return []string{} // empty for line filter +} + +func (f *IPLineFilter) filterTy(line []byte, ty labels.MatchType) bool { + if ty == labels.MatchNotEqual { + return !f.ip.filter(line) + } + return f.ip.filter(line) +} + +type IPLabelFilter struct { + ip *ipFilter + Ty LabelFilterType + + // if used as Label matcher, this holds the identifier Label name. + // e.g: (|remote_addr = ip("xxx")). Here labelName is `remote_addr` + Label string + + // patError records if given pattern is invalid. + patError error + + // local copy of Pattern to display it in errors, even though Pattern matcher fails because of invalid Pattern. + Pattern string +} + +// NewIPLabelFilter is used to construct ip filter as label filter for the given `label`. +func NewIPLabelFilter(pattern, label string, ty LabelFilterType) *IPLabelFilter { + ip, err := newIPFilter(pattern) + return &IPLabelFilter{ + ip: ip, + Label: label, + Ty: ty, + patError: err, + Pattern: pattern, + } +} + +// `Process` implements `Stage` interface +func (f *IPLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + return line, f.filterTy(line, f.Ty, lbs) +} + +func (f *IPLabelFilter) isLabelFilterer() {} + +// `RequiredLabelNames` implements `Stage` interface +func (f *IPLabelFilter) RequiredLabelNames() []string { + return []string{f.Label} +} + +// PatternError will be used `labelFilter.Stage()` method so that, if the given pattern is wrong +// it returns proper 400 error to the client of LogQL. +func (f *IPLabelFilter) PatternError() error { + return f.patError +} + +func (f *IPLabelFilter) filterTy(_ []byte, ty LabelFilterType, lbs *LabelsBuilder) bool { + if lbs.HasErr() { + // why `true`?. if there's an error only the string matchers can filter out. + return true + } + input, ok := lbs.Get(f.Label) + if !ok { + // we have not found the label. + return false + } + + if f.ip == nil { + return false + } + + switch ty { + case LabelFilterEqual: + return f.ip.filter([]byte(input)) + case LabelFilterNotEqual: + return !f.ip.filter([]byte(input)) + } + return false +} + +// `String` implements fmt.Stringer inteface, by which also implements `LabelFilterer` inteface. +func (f *IPLabelFilter) String() string { + eq := "=" // LabelFilterEqual -> "==", we don't want in string representation of ip label filter. + if f.Ty == LabelFilterNotEqual { + eq = LabelFilterNotEqual.String() + } + + return fmt.Sprintf("%s%sip(%q)", f.Label, eq, f.Pattern) // label filter +} + +// ipFilter search for IP addresses of given `pattern` in the given `line`. +// It returns true if pattern is matched with at least one IP in the `line` + +// pattern - can be of the following form for both IPv4 and IPv6. +// 1. SINGLE-IP - "192.168.0.1" +// 2. IP RANGE - "192.168.0.1-192.168.0.23" +// 3. CIDR - "192.168.0.0/16" +type ipFilter struct { + pattern string + matcher IPMatcher +} + +func newIPFilter(pattern string) (*ipFilter, error) { + filter := &ipFilter{pattern: pattern} + + matcher, err := getMatcher(pattern) + if err != nil { + return nil, err + } + filter.matcher = matcher + + return filter, nil +} + +// filter does the heavy lifting finding ip `pattern` in the givin `line`. +// This is the function if you want to understand how the core logic how ip filter works! +func (f *ipFilter) filter(line []byte) bool { + if len(line) == 0 { + return false + } + + n := len(line) + + filterFn := func(line []byte, start int, charset string) (bool, int) { + iplen := bytesSpan(line[start:], []byte(charset)) + if iplen < 0 { + return false, 0 + } + ip, err := netip.ParseAddr(string(line[start : start+iplen])) + if err == nil { + if containsIP(f.matcher, ip) { + return true, 0 + } + } + return false, iplen + } + + // This loop try to extract IPv4 or IPv6 address from the arbitrary string. + // It uses IPv4 and IPv6 prefix hints to find the IP addresses faster without using regexp. + for i := 0; i < n; i++ { + if i+3 < n && ipv4Hint([4]byte{line[i], line[i+1], line[i+2], line[i+3]}) { + ok, iplen := filterFn(line, i, IPv4Charset) + if ok { + return true + } + i += iplen + continue + } + + if i+4 < n && ipv6Hint([5]byte{line[i], line[i+1], line[i+2], line[i+3], line[i+4]}) { + ok, iplen := filterFn(line, i, IPv6Charset) + if ok { + return true + } + i += iplen + continue + } + } + return false +} + +func containsIP(matcher IPMatcher, ip netip.Addr) bool { + switch m := matcher.(type) { + case netip.Addr: + return m.Compare(ip) == 0 + case netipx.IPRange: + return m.Contains(ip) + case netip.Prefix: + return m.Contains(ip) + } + return false +} + +func getMatcher(pattern string) (IPMatcher, error) { + var ( + matcher IPMatcher + err error + ) + + matcher, err = netip.ParseAddr(pattern) // is it simple single IP? + if err == nil { + return matcher, nil + } + matcher, err = netip.ParsePrefix(pattern) // is it cidr format? (192.168.0.1/16) + if err == nil { + return matcher, nil + } + + matcher, err = netipx.ParseIPRange(pattern) // is it IP range format? (192.168.0.1 - 192.168.4.5 + if err == nil { + return matcher, nil + } + + return nil, fmt.Errorf("%w: %q", ErrIPFilterInvalidPattern, pattern) +} + +func ipv4Hint(prefix [4]byte) bool { + return unicode.IsDigit(rune(prefix[0])) && (prefix[1] == '.' || prefix[2] == '.' || prefix[3] == '.') +} + +func ipv6Hint(prefix [5]byte) bool { + hint1 := prefix[0] == ':' && prefix[1] == ':' && isHexDigit(prefix[2]) + hint2 := isHexDigit(prefix[0]) && prefix[1] == ':' + hint3 := isHexDigit(prefix[0]) && isHexDigit(prefix[1]) && prefix[2] == ':' + hint4 := isHexDigit(prefix[0]) && isHexDigit(prefix[1]) && isHexDigit(prefix[2]) && prefix[3] == ':' + hint5 := isHexDigit(prefix[0]) && isHexDigit(prefix[1]) && isHexDigit(prefix[2]) && isHexDigit(prefix[3]) && prefix[4] == ':' + + return hint1 || hint2 || hint3 || hint4 || hint5 +} + +func isHexDigit(r byte) bool { + return unicode.IsDigit(rune(r)) || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} + +// bytesSpan is same as C's `strcspan()` function. +// It returns the number of chars in the initial segment of `s` +// which consist only of chars from `accept`. +func bytesSpan(s, accept []byte) int { + m := make(map[byte]bool) + + for _, r := range accept { + m[r] = true + } + + for i, r := range s { + if !m[r] { + return i + } + } + + return len(s) +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y new file mode 100644 index 00000000..cc279375 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y @@ -0,0 +1,56 @@ +// Inspired by https://github.com/sjjian/yacc-examples + +%{ +package jsonexpr + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +%} + +%union { + empty struct{} + str string + field string + list []interface{} + int int +} + +%token DOT LSB RSB +%token STRING +%token FIELD +%token INDEX + +%type index index_access +%type field key key_access +%type values + +%% + +json: + values { setScannerData(JSONExprlex, $1) } + +values: + field { $$ = []interface{}{$1} } + | key_access { $$ = []interface{}{$1} } + | index_access { $$ = []interface{}{$1} } + | values key_access { $$ = append($1, $2) } + | values index_access { $$ = append($1, $2) } + | values DOT field { $$ = append($1, $3) } + ; + +key_access: + LSB key RSB { $$ = $2 } + +index_access: + LSB index RSB { $$ = $2 } + +field: + FIELD { $$ = $1 } + +key: + STRING { $$ = $1 } + +index: + INDEX { $$ = $1 } \ No newline at end of file diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y.go b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y.go new file mode 100644 index 00000000..8d21681b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/jsonexpr.y.go @@ -0,0 +1,517 @@ +// Code generated by goyacc -p JSONExpr -o pkg/logql/log/jsonexpr/jsonexpr.y.go pkg/logql/log/jsonexpr/jsonexpr.y. DO NOT EDIT. + +//line pkg/logql/log/jsonexpr/jsonexpr.y:4 +package jsonexpr + +import __yyfmt__ "fmt" + +//line pkg/logql/log/jsonexpr/jsonexpr.y:4 + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +//line pkg/logql/log/jsonexpr/jsonexpr.y:12 +type JSONExprSymType struct { + yys int + empty struct{} + str string + field string + list []interface{} + int int +} + +const DOT = 57346 +const LSB = 57347 +const RSB = 57348 +const STRING = 57349 +const FIELD = 57350 +const INDEX = 57351 + +var JSONExprToknames = [...]string{ + "$end", + "error", + "$unk", + "DOT", + "LSB", + "RSB", + "STRING", + "FIELD", + "INDEX", +} + +var JSONExprStatenames = [...]string{} + +const JSONExprEofCode = 1 +const JSONExprErrCode = 2 +const JSONExprInitialStackSize = 16 + +//line yacctab:1 +var JSONExprExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, +} + +const JSONExprPrivate = 57344 + +const JSONExprLast = 19 + +var JSONExprAct = [...]int{ + 3, 13, 7, 14, 6, 6, 17, 16, 10, 7, + 4, 15, 5, 8, 1, 9, 2, 11, 12, +} + +var JSONExprPact = [...]int{ + -3, -1000, 4, -1000, -1000, -1000, -1000, -6, -1000, -1000, + -4, 1, 0, -1000, -1000, -1000, -1000, -1000, +} + +var JSONExprPgo = [...]int{ + 0, 18, 12, 0, 17, 10, 16, 14, +} + +var JSONExprR1 = [...]int{ + 0, 7, 6, 6, 6, 6, 6, 6, 5, 2, + 3, 4, 1, +} + +var JSONExprR2 = [...]int{ + 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 1, +} + +var JSONExprChk = [...]int{ + -1000, -7, -6, -3, -5, -2, 8, 5, -5, -2, + 4, -4, -1, 7, 9, -3, 6, 6, +} + +var JSONExprDef = [...]int{ + 0, -2, 1, 2, 3, 4, 10, 0, 5, 6, + 0, 0, 0, 11, 12, 7, 8, 9, +} + +var JSONExprTok1 = [...]int{ + 1, +} + +var JSONExprTok2 = [...]int{ + 2, 3, 4, 5, 6, 7, 8, 9, +} + +var JSONExprTok3 = [...]int{ + 0, +} + +var JSONExprErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + JSONExprDebug = 0 + JSONExprErrorVerbose = false +) + +type JSONExprLexer interface { + Lex(lval *JSONExprSymType) int + Error(s string) +} + +type JSONExprParser interface { + Parse(JSONExprLexer) int + Lookahead() int +} + +type JSONExprParserImpl struct { + lval JSONExprSymType + stack [JSONExprInitialStackSize]JSONExprSymType + char int +} + +func (p *JSONExprParserImpl) Lookahead() int { + return p.char +} + +func JSONExprNewParser() JSONExprParser { + return &JSONExprParserImpl{} +} + +const JSONExprFlag = -1000 + +func JSONExprTokname(c int) string { + if c >= 1 && c-1 < len(JSONExprToknames) { + if JSONExprToknames[c-1] != "" { + return JSONExprToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func JSONExprStatname(s int) string { + if s >= 0 && s < len(JSONExprStatenames) { + if JSONExprStatenames[s] != "" { + return JSONExprStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func JSONExprErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !JSONExprErrorVerbose { + return "syntax error" + } + + for _, e := range JSONExprErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + JSONExprTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := JSONExprPact[state] + for tok := TOKSTART; tok-1 < len(JSONExprToknames); tok++ { + if n := base + tok; n >= 0 && n < JSONExprLast && JSONExprChk[JSONExprAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if JSONExprDef[state] == -2 { + i := 0 + for JSONExprExca[i] != -1 || JSONExprExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; JSONExprExca[i] >= 0; i += 2 { + tok := JSONExprExca[i] + if tok < TOKSTART || JSONExprExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if JSONExprExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += JSONExprTokname(tok) + } + return res +} + +func JSONExprlex1(lex JSONExprLexer, lval *JSONExprSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = JSONExprTok1[0] + goto out + } + if char < len(JSONExprTok1) { + token = JSONExprTok1[char] + goto out + } + if char >= JSONExprPrivate { + if char < JSONExprPrivate+len(JSONExprTok2) { + token = JSONExprTok2[char-JSONExprPrivate] + goto out + } + } + for i := 0; i < len(JSONExprTok3); i += 2 { + token = JSONExprTok3[i+0] + if token == char { + token = JSONExprTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = JSONExprTok2[1] /* unknown char */ + } + if JSONExprDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", JSONExprTokname(token), uint(char)) + } + return char, token +} + +func JSONExprParse(JSONExprlex JSONExprLexer) int { + return JSONExprNewParser().Parse(JSONExprlex) +} + +func (JSONExprrcvr *JSONExprParserImpl) Parse(JSONExprlex JSONExprLexer) int { + var JSONExprn int + var JSONExprVAL JSONExprSymType + var JSONExprDollar []JSONExprSymType + _ = JSONExprDollar // silence set and not used + JSONExprS := JSONExprrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + JSONExprstate := 0 + JSONExprrcvr.char = -1 + JSONExprtoken := -1 // JSONExprrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + JSONExprstate = -1 + JSONExprrcvr.char = -1 + JSONExprtoken = -1 + }() + JSONExprp := -1 + goto JSONExprstack + +ret0: + return 0 + +ret1: + return 1 + +JSONExprstack: + /* put a state and value onto the stack */ + if JSONExprDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", JSONExprTokname(JSONExprtoken), JSONExprStatname(JSONExprstate)) + } + + JSONExprp++ + if JSONExprp >= len(JSONExprS) { + nyys := make([]JSONExprSymType, len(JSONExprS)*2) + copy(nyys, JSONExprS) + JSONExprS = nyys + } + JSONExprS[JSONExprp] = JSONExprVAL + JSONExprS[JSONExprp].yys = JSONExprstate + +JSONExprnewstate: + JSONExprn = JSONExprPact[JSONExprstate] + if JSONExprn <= JSONExprFlag { + goto JSONExprdefault /* simple state */ + } + if JSONExprrcvr.char < 0 { + JSONExprrcvr.char, JSONExprtoken = JSONExprlex1(JSONExprlex, &JSONExprrcvr.lval) + } + JSONExprn += JSONExprtoken + if JSONExprn < 0 || JSONExprn >= JSONExprLast { + goto JSONExprdefault + } + JSONExprn = JSONExprAct[JSONExprn] + if JSONExprChk[JSONExprn] == JSONExprtoken { /* valid shift */ + JSONExprrcvr.char = -1 + JSONExprtoken = -1 + JSONExprVAL = JSONExprrcvr.lval + JSONExprstate = JSONExprn + if Errflag > 0 { + Errflag-- + } + goto JSONExprstack + } + +JSONExprdefault: + /* default state action */ + JSONExprn = JSONExprDef[JSONExprstate] + if JSONExprn == -2 { + if JSONExprrcvr.char < 0 { + JSONExprrcvr.char, JSONExprtoken = JSONExprlex1(JSONExprlex, &JSONExprrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if JSONExprExca[xi+0] == -1 && JSONExprExca[xi+1] == JSONExprstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + JSONExprn = JSONExprExca[xi+0] + if JSONExprn < 0 || JSONExprn == JSONExprtoken { + break + } + } + JSONExprn = JSONExprExca[xi+1] + if JSONExprn < 0 { + goto ret0 + } + } + if JSONExprn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + JSONExprlex.Error(JSONExprErrorMessage(JSONExprstate, JSONExprtoken)) + Nerrs++ + if JSONExprDebug >= 1 { + __yyfmt__.Printf("%s", JSONExprStatname(JSONExprstate)) + __yyfmt__.Printf(" saw %s\n", JSONExprTokname(JSONExprtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for JSONExprp >= 0 { + JSONExprn = JSONExprPact[JSONExprS[JSONExprp].yys] + JSONExprErrCode + if JSONExprn >= 0 && JSONExprn < JSONExprLast { + JSONExprstate = JSONExprAct[JSONExprn] /* simulate a shift of "error" */ + if JSONExprChk[JSONExprstate] == JSONExprErrCode { + goto JSONExprstack + } + } + + /* the current p has no shift on "error", pop stack */ + if JSONExprDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", JSONExprS[JSONExprp].yys) + } + JSONExprp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if JSONExprDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", JSONExprTokname(JSONExprtoken)) + } + if JSONExprtoken == JSONExprEofCode { + goto ret1 + } + JSONExprrcvr.char = -1 + JSONExprtoken = -1 + goto JSONExprnewstate /* try again in the same state */ + } + } + + /* reduction by production JSONExprn */ + if JSONExprDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", JSONExprn, JSONExprStatname(JSONExprstate)) + } + + JSONExprnt := JSONExprn + JSONExprpt := JSONExprp + _ = JSONExprpt // guard against "declared and not used" + + JSONExprp -= JSONExprR2[JSONExprn] + // JSONExprp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if JSONExprp+1 >= len(JSONExprS) { + nyys := make([]JSONExprSymType, len(JSONExprS)*2) + copy(nyys, JSONExprS) + JSONExprS = nyys + } + JSONExprVAL = JSONExprS[JSONExprp+1] + + /* consult goto table to find next state */ + JSONExprn = JSONExprR1[JSONExprn] + JSONExprg := JSONExprPgo[JSONExprn] + JSONExprj := JSONExprg + JSONExprS[JSONExprp].yys + 1 + + if JSONExprj >= JSONExprLast { + JSONExprstate = JSONExprAct[JSONExprg] + } else { + JSONExprstate = JSONExprAct[JSONExprj] + if JSONExprChk[JSONExprstate] != -JSONExprn { + JSONExprstate = JSONExprAct[JSONExprg] + } + } + // dummy call; replaced with literal code + switch JSONExprnt { + + case 1: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:32 + { + setScannerData(JSONExprlex, JSONExprDollar[1].list) + } + case 2: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:35 + { + JSONExprVAL.list = []interface{}{JSONExprDollar[1].str} + } + case 3: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:36 + { + JSONExprVAL.list = []interface{}{JSONExprDollar[1].str} + } + case 4: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:37 + { + JSONExprVAL.list = []interface{}{JSONExprDollar[1].int} + } + case 5: + JSONExprDollar = JSONExprS[JSONExprpt-2 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:38 + { + JSONExprVAL.list = append(JSONExprDollar[1].list, JSONExprDollar[2].str) + } + case 6: + JSONExprDollar = JSONExprS[JSONExprpt-2 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:39 + { + JSONExprVAL.list = append(JSONExprDollar[1].list, JSONExprDollar[2].int) + } + case 7: + JSONExprDollar = JSONExprS[JSONExprpt-3 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:40 + { + JSONExprVAL.list = append(JSONExprDollar[1].list, JSONExprDollar[3].str) + } + case 8: + JSONExprDollar = JSONExprS[JSONExprpt-3 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:44 + { + JSONExprVAL.str = JSONExprDollar[2].str + } + case 9: + JSONExprDollar = JSONExprS[JSONExprpt-3 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:47 + { + JSONExprVAL.int = JSONExprDollar[2].int + } + case 10: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:50 + { + JSONExprVAL.str = JSONExprDollar[1].field + } + case 11: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:53 + { + JSONExprVAL.str = JSONExprDollar[1].str + } + case 12: + JSONExprDollar = JSONExprS[JSONExprpt-1 : JSONExprpt+1] +//line pkg/logql/log/jsonexpr/jsonexpr.y:56 + { + JSONExprVAL.int = JSONExprDollar[1].int + } + } + goto JSONExprstack /* stack new state and value */ +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/lexer.go b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/lexer.go new file mode 100644 index 00000000..f3ba6dcd --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/lexer.go @@ -0,0 +1,172 @@ +package jsonexpr + +import ( + "bufio" + "fmt" + "io" + "strconv" + "text/scanner" +) + +type Scanner struct { + buf *bufio.Reader + data []interface{} + err error + debug bool +} + +func NewScanner(r io.Reader, debug bool) *Scanner { + return &Scanner{ + buf: bufio.NewReader(r), + debug: debug, + } +} + +func (sc *Scanner) Error(s string) { + sc.err = fmt.Errorf(s) + fmt.Printf("syntax error: %s\n", s) +} + +func (sc *Scanner) Reduced(rule, state int, lval *JSONExprSymType) bool { + if sc.debug { + fmt.Printf("rule: %v; state %v; lval: %v\n", rule, state, lval) + } + return false +} + +func (sc *Scanner) Lex(lval *JSONExprSymType) int { + return sc.lex(lval) +} + +func (sc *Scanner) lex(lval *JSONExprSymType) int { + for { + r := sc.read() + + if r == 0 { + return 0 + } + if isWhitespace(r) { + continue + } + + if isDigit(r) { + sc.unread() + val, err := sc.scanInt() + if err != nil { + sc.err = fmt.Errorf(err.Error()) + return 0 + } + + lval.int = val + return INDEX + } + + switch true { + case r == '[': + return LSB + case r == ']': + return RSB + case r == '.': + return DOT + case isStartIdentifier(r): + sc.unread() + lval.field = sc.scanField() + return FIELD + case r == '"': + sc.unread() + lval.str = sc.scanStr() + return STRING + default: + sc.err = fmt.Errorf("unexpected char %c", r) + return 0 + } + } +} + +func isStartIdentifier(r rune) bool { + return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' +} + +func isIdentifier(r rune) bool { + return isStartIdentifier(r) || (r >= '0' && r <= '9') +} + +func (sc *Scanner) scanField() string { + var str []rune + + for { + r := sc.read() + if !isIdentifier(r) || isEndOfInput(r) { + sc.unread() + break + } + + str = append(str, r) + } + return string(str) +} + +func (sc *Scanner) scanStr() string { + var str []rune + //begin with ", end with " + r := sc.read() + if r != '"' { + sc.err = fmt.Errorf("unexpected char %c", r) + return "" + } + + for { + r := sc.read() + if isEndOfInput(r) { + break + } + + if r == '"' || r == ']' { + break + } + str = append(str, r) + } + return string(str) +} + +func (sc *Scanner) scanInt() (int, error) { + var number []rune + + for { + r := sc.read() + if r == '.' && len(number) > 0 { + return 0, fmt.Errorf("cannot use float as array index") + } + + if isWhitespace(r) || r == '.' || r == ']' { + sc.unread() + break + } + + if !isDigit(r) { + return 0, fmt.Errorf("non-integer value: %c", r) + } + + number = append(number, r) + } + + return strconv.Atoi(string(number)) +} + +// input is either terminated by EOF or null byte +func isEndOfInput(r rune) bool { + return r == scanner.EOF || r == rune(0) +} + +func (sc *Scanner) read() rune { + ch, _, _ := sc.buf.ReadRune() + return ch +} + +func (sc *Scanner) unread() { _ = sc.buf.UnreadRune() } + +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/parser.go b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/parser.go new file mode 100644 index 00000000..9d14004d --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/jsonexpr/parser.go @@ -0,0 +1,19 @@ +package jsonexpr + +import ( + "strings" +) + +func init() { + JSONExprErrorVerbose = true +} + +func Parse(expr string, debug bool) ([]interface{}, error) { + s := NewScanner(strings.NewReader(expr), debug) + JSONExprParse(s) + + if s.err != nil { + return nil, s.err + } + return s.data, nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/keep_labels.go b/vendor/github.com/grafana/loki/pkg/logql/log/keep_labels.go new file mode 100644 index 00000000..43ed2ab6 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/keep_labels.go @@ -0,0 +1,72 @@ +package log + +import ( + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +type KeepLabels struct { + keepLabels []KeepLabel +} + +type KeepLabel struct { + Matcher *labels.Matcher + Name string +} + +func NewKeepLabel(matcher *labels.Matcher, name string) KeepLabel { + return KeepLabel{ + Matcher: matcher, + Name: name, + } +} + +func NewKeepLabels(kl []KeepLabel) *KeepLabels { + return &KeepLabels{keepLabels: kl} +} + +func (kl *KeepLabels) Process(_ int64, line []byte, lbls *LabelsBuilder) ([]byte, bool) { + if len(kl.keepLabels) == 0 { + return line, true + } + + // TODO: Reuse buf? + for _, lb := range lbls.UnsortedLabels(nil) { + if isSpecialLabel(lb.Name) { + continue + } + + var keep bool + for _, keepLabel := range kl.keepLabels { + if keepLabel.Matcher != nil && keepLabel.Matcher.Name == lb.Name && keepLabel.Matcher.Matches(lb.Value) { + keep = true + break + } + + if keepLabel.Name == lb.Name { + keep = true + break + } + } + + if !keep { + lbls.Del(lb.Name) + } + } + + return line, true +} + +func (kl *KeepLabels) RequiredLabelNames() []string { + return []string{} +} + +func isSpecialLabel(lblName string) bool { + switch lblName { + case logqlmodel.ErrorLabel, logqlmodel.ErrorDetailsLabel, logqlmodel.PreserveErrorLabel: + return true + } + + return false +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/label_extraction_expr.go b/vendor/github.com/grafana/loki/pkg/logql/log/label_extraction_expr.go new file mode 100644 index 00000000..79a0ab71 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/label_extraction_expr.go @@ -0,0 +1,13 @@ +package log + +type LabelExtractionExpr struct { + Identifier string + Expression string +} + +func NewLabelExtractionExpr(identifier, expression string) LabelExtractionExpr { + return LabelExtractionExpr{ + Identifier: identifier, + Expression: expression, + } +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/label_filter.go b/vendor/github.com/grafana/loki/pkg/logql/log/label_filter.go new file mode 100644 index 00000000..e3bb1a4b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/label_filter.go @@ -0,0 +1,416 @@ +package log + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + + "github.com/dustin/go-humanize" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +var ( + _ LabelFilterer = &BinaryLabelFilter{} + _ LabelFilterer = &BytesLabelFilter{} + _ LabelFilterer = &DurationLabelFilter{} + _ LabelFilterer = &NumericLabelFilter{} + _ LabelFilterer = &StringLabelFilter{} +) + +// LabelFilterType is an enum for label filtering types. +type LabelFilterType int + +// Possible LabelFilterType. +const ( + LabelFilterEqual LabelFilterType = iota + LabelFilterNotEqual + LabelFilterGreaterThan + LabelFilterGreaterThanOrEqual + LabelFilterLesserThan + LabelFilterLesserThanOrEqual +) + +func (f LabelFilterType) String() string { + switch f { + case LabelFilterEqual: + return "==" + case LabelFilterNotEqual: + return "!=" + case LabelFilterGreaterThan: + return ">" + case LabelFilterGreaterThanOrEqual: + return ">=" + case LabelFilterLesserThan: + return "<" + case LabelFilterLesserThanOrEqual: + return "<=" + default: + return "" + } +} + +// LabelFilterer can filter extracted labels. +// +//sumtype:decl +type LabelFilterer interface { + Stage + fmt.Stringer + + // Seal trait + isLabelFilterer() +} + +type BinaryLabelFilter struct { + Left LabelFilterer + Right LabelFilterer + And bool +} + +// NewAndLabelFilter creates a new LabelFilterer from a and binary operation of two LabelFilterer. +func NewAndLabelFilter(left LabelFilterer, right LabelFilterer) *BinaryLabelFilter { + return &BinaryLabelFilter{ + Left: left, + Right: right, + And: true, + } +} + +// NewOrLabelFilter creates a new LabelFilterer from a or binary operation of two LabelFilterer. +func NewOrLabelFilter(left LabelFilterer, right LabelFilterer) *BinaryLabelFilter { + return &BinaryLabelFilter{ + Left: left, + Right: right, + } +} + +func (b *BinaryLabelFilter) Process(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + line, lok := b.Left.Process(ts, line, lbs) + if !b.And && lok { + return line, true + } + line, rok := b.Right.Process(ts, line, lbs) + if !b.And { + return line, lok || rok + } + return line, lok && rok +} + +func (b *BinaryLabelFilter) isLabelFilterer() {} + +func (b *BinaryLabelFilter) RequiredLabelNames() []string { + var names []string + names = append(names, b.Left.RequiredLabelNames()...) + names = append(names, b.Right.RequiredLabelNames()...) + return uniqueString(names) +} + +func (b *BinaryLabelFilter) String() string { + var sb strings.Builder + sb.WriteString("( ") + sb.WriteString(b.Left.String()) + if b.And { + sb.WriteString(" , ") + } else { + sb.WriteString(" or ") + } + sb.WriteString(b.Right.String()) + sb.WriteString(" )") + return sb.String() +} + +type NoopLabelFilter struct { + *labels.Matcher +} + +func (NoopLabelFilter) Process(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, true +} + +func (NoopLabelFilter) isLabelFilterer() {} + +func (NoopLabelFilter) RequiredLabelNames() []string { return []string{} } + +func (f NoopLabelFilter) String() string { + if f.Matcher != nil { + return f.Matcher.String() + } + return "" +} + +// ReduceAndLabelFilter Reduces multiple label filterer into one using binary and operation. +func ReduceAndLabelFilter(filters []LabelFilterer) LabelFilterer { + if len(filters) == 0 { + return &NoopLabelFilter{} + } + if len(filters) == 1 { + return filters[0] + } + result := filters[0] + for _, f := range filters[1:] { + result = NewAndLabelFilter(result, f) + } + return result +} + +type BytesLabelFilter struct { + Name string + Value uint64 + Type LabelFilterType +} + +// NewBytesLabelFilter creates a new label filterer which parses bytes string representation (1KB) from the value of the named label +// and compares it with the given b value. +func NewBytesLabelFilter(t LabelFilterType, name string, b uint64) *BytesLabelFilter { + return &BytesLabelFilter{ + Name: name, + Type: t, + Value: b, + } +} + +func (d *BytesLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + if lbs.HasErr() { + // if there's an error only the string matchers can filter it out. + return line, true + } + v, ok := lbs.Get(d.Name) + if !ok { + // we have not found this label. + return line, false + } + value, err := humanize.ParseBytes(v) + if err != nil { + lbs.SetErr(errLabelFilter) + lbs.SetErrorDetails(err.Error()) + return line, true + } + switch d.Type { + case LabelFilterEqual: + return line, value == d.Value + case LabelFilterNotEqual: + return line, value != d.Value + case LabelFilterGreaterThan: + return line, value > d.Value + case LabelFilterGreaterThanOrEqual: + return line, value >= d.Value + case LabelFilterLesserThan: + return line, value < d.Value + case LabelFilterLesserThanOrEqual: + return line, value <= d.Value + default: + lbs.SetErr(errLabelFilter) + return line, true + } +} + +func (d *BytesLabelFilter) isLabelFilterer() {} + +func (d *BytesLabelFilter) RequiredLabelNames() []string { + return []string{d.Name} +} + +func (d *BytesLabelFilter) String() string { + b := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r + }, humanize.Bytes(d.Value)) // TODO: discuss whether this should just be bytes, B, to be more accurate. + return fmt.Sprintf("%s%s%s", d.Name, d.Type, b) +} + +type DurationLabelFilter struct { + Name string + Value time.Duration + Type LabelFilterType +} + +// NewDurationLabelFilter creates a new label filterer which parses duration string representation (5s) +// from the value of the named label and compares it with the given d value. +func NewDurationLabelFilter(t LabelFilterType, name string, d time.Duration) *DurationLabelFilter { + return &DurationLabelFilter{ + Name: name, + Type: t, + Value: d, + } +} + +func (d *DurationLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + if lbs.HasErr() { + // if there's an error only the string matchers can filter out. + return line, true + } + v, ok := lbs.Get(d.Name) + if !ok { + // we have not found this label. + return line, false + } + value, err := time.ParseDuration(v) + if err != nil { + lbs.SetErr(errLabelFilter) + lbs.SetErrorDetails(err.Error()) + return line, true + } + switch d.Type { + case LabelFilterEqual: + return line, value == d.Value + case LabelFilterNotEqual: + return line, value != d.Value + case LabelFilterGreaterThan: + return line, value > d.Value + case LabelFilterGreaterThanOrEqual: + return line, value >= d.Value + case LabelFilterLesserThan: + return line, value < d.Value + case LabelFilterLesserThanOrEqual: + return line, value <= d.Value + default: + lbs.SetErr(errLabelFilter) + return line, true + } +} + +func (d *DurationLabelFilter) isLabelFilterer() {} + +func (d *DurationLabelFilter) RequiredLabelNames() []string { + return []string{d.Name} +} + +func (d *DurationLabelFilter) String() string { + return fmt.Sprintf("%s%s%s", d.Name, d.Type, d.Value) +} + +type NumericLabelFilter struct { + Name string + Value float64 + Type LabelFilterType + err error +} + +// NewNumericLabelFilter creates a new label filterer which parses float64 string representation (5.2) +// from the value of the named label and compares it with the given f value. +func NewNumericLabelFilter(t LabelFilterType, name string, v float64) *NumericLabelFilter { + return &NumericLabelFilter{ + Name: name, + Type: t, + Value: v, + } +} + +func (n *NumericLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + if lbs.HasErr() { + // if there's an error only the string matchers can filter out. + return line, true + } + v, ok := lbs.Get(n.Name) + if !ok { + // we have not found this label. + return line, false + } + value, err := strconv.ParseFloat(v, 64) + if err != nil { + lbs.SetErr(errLabelFilter) + lbs.SetErrorDetails(err.Error()) + return line, true + } + switch n.Type { + case LabelFilterEqual: + return line, value == n.Value + case LabelFilterNotEqual: + return line, value != n.Value + case LabelFilterGreaterThan: + return line, value > n.Value + case LabelFilterGreaterThanOrEqual: + return line, value >= n.Value + case LabelFilterLesserThan: + return line, value < n.Value + case LabelFilterLesserThanOrEqual: + return line, value <= n.Value + default: + lbs.SetErr(errLabelFilter) + return line, true + } + +} + +func (n *NumericLabelFilter) isLabelFilterer() {} + +func (n *NumericLabelFilter) RequiredLabelNames() []string { + return []string{n.Name} +} + +func (n *NumericLabelFilter) String() string { + return fmt.Sprintf("%s%s%s", n.Name, n.Type, strconv.FormatFloat(n.Value, 'f', -1, 64)) +} + +type StringLabelFilter struct { + *labels.Matcher +} + +// NewStringLabelFilter creates a new label filterer which compares string label. +// This is the only LabelFilterer that can filter out the __error__ label. +// Unlike other LabelFilterer which apply conversion, if the label name doesn't exist it is compared with an empty value. +func NewStringLabelFilter(m *labels.Matcher) LabelFilterer { + f, err := NewLabelFilter(m.Value, m.Type) + if err != nil { + return &StringLabelFilter{Matcher: m} + } + + if f == TrueFilter { + return &NoopLabelFilter{m} + } + + return &LineFilterLabelFilter{ + Matcher: m, + filter: f, + } +} + +func (s *StringLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + return line, s.Matches(labelValue(s.Name, lbs)) +} + +func (s *StringLabelFilter) isLabelFilterer() {} + +func (s *StringLabelFilter) RequiredLabelNames() []string { + return []string{s.Name} +} + +// LineFilterLabelFilter filters the desired label using an optimized line filter +type LineFilterLabelFilter struct { + *labels.Matcher + filter Filterer +} + +// overrides the matcher.String() function in case there is a regexpFilter +func (s *LineFilterLabelFilter) String() string { + if unwrappedFilter, ok := s.filter.(regexpFilter); ok { + rStr := unwrappedFilter.String() + str := fmt.Sprintf("%s%s`%s`", s.Matcher.Name, s.Matcher.Type, rStr) + return str + } + return s.Matcher.String() +} + +func (s *LineFilterLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + v := labelValue(s.Name, lbs) + return line, s.filter.Filter(unsafeGetBytes(v)) +} + +func (s *LineFilterLabelFilter) isLabelFilterer() {} + +func (s *LineFilterLabelFilter) RequiredLabelNames() []string { + return []string{s.Name} +} + +func labelValue(name string, lbs *LabelsBuilder) string { + if name == logqlmodel.ErrorLabel { + return lbs.GetErr() + } + v, _ := lbs.Get(name) + return v +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/labels.go b/vendor/github.com/grafana/loki/pkg/logql/log/labels.go new file mode 100644 index 00000000..7bc313c8 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/labels.go @@ -0,0 +1,650 @@ +package log + +import ( + "fmt" + "sort" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +const MaxInternedStrings = 1024 + +var EmptyLabelsResult = NewLabelsResult(labels.EmptyLabels().String(), labels.EmptyLabels().Hash(), labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels()) + +// LabelsResult is a computed labels result that contains the labels set with associated string and hash. +// The is mainly used for caching and returning labels computations out of pipelines and stages. +type LabelsResult interface { + String() string + Labels() labels.Labels + Stream() labels.Labels + StructuredMetadata() labels.Labels + Parsed() labels.Labels + Hash() uint64 +} + +// NewLabelsResult creates a new LabelsResult. +// It takes the string representation of the labels, the hash of the labels and the labels categorized. +func NewLabelsResult(allLabelsStr string, hash uint64, stream, structuredMetadata, parsed labels.Labels) LabelsResult { + return &labelsResult{ + s: allLabelsStr, + h: hash, + stream: stream, + structuredMetadata: structuredMetadata, + parsed: parsed, + } +} + +type labelsResult struct { + s string + h uint64 + + stream labels.Labels + structuredMetadata labels.Labels + parsed labels.Labels +} + +func (l labelsResult) String() string { + return l.s +} + +func (l labelsResult) Labels() labels.Labels { + return flattenLabels(nil, l.stream, l.structuredMetadata, l.parsed) +} + +func (l labelsResult) Hash() uint64 { + return l.h +} + +func (l labelsResult) Stream() labels.Labels { + if len(l.stream) == 0 { + return nil + } + return l.stream +} + +func (l labelsResult) StructuredMetadata() labels.Labels { + if len(l.structuredMetadata) == 0 { + return nil + } + return l.structuredMetadata +} + +func (l labelsResult) Parsed() labels.Labels { + if len(l.parsed) == 0 { + return nil + } + return l.parsed +} + +type hasher struct { + buf []byte // buffer for computing hash without bytes slice allocation. +} + +// newHasher allow to compute hashes for labels by reusing the same buffer. +func newHasher() *hasher { + return &hasher{ + buf: make([]byte, 0, 1024), + } +} + +// Hash hashes the labels +func (h *hasher) Hash(lbs labels.Labels) uint64 { + var hash uint64 + hash, h.buf = lbs.HashWithoutLabels(h.buf, []string(nil)...) + return hash +} + +type LabelCategory int + +const ( + StreamLabel LabelCategory = iota + StructuredMetadataLabel + ParsedLabel + InvalidCategory + + numValidCategories = 3 +) + +var allCategories = []LabelCategory{ + StreamLabel, + StructuredMetadataLabel, + ParsedLabel, +} + +func categoriesContain(categories []LabelCategory, category LabelCategory) bool { + for _, c := range categories { + if c == category { + return true + } + } + return false +} + +// BaseLabelsBuilder is a label builder used by pipeline and stages. +// Only one base builder is used and it contains cache for each LabelsBuilders. +type BaseLabelsBuilder struct { + del []string + add [numValidCategories]labels.Labels + // nolint:structcheck + // https://github.com/golangci/golangci-lint/issues/826 + err string + // nolint:structcheck + errDetails string + + groups []string + parserKeyHints ParserHint // label key hints for metric queries that allows to limit parser extractions to only this list of labels. + without, noLabels bool + + resultCache map[uint64]LabelsResult + *hasher +} + +// LabelsBuilder is the same as labels.Builder but tailored for this package. +type LabelsBuilder struct { + base labels.Labels + baseMap map[string]string + buf labels.Labels + currentResult LabelsResult + groupedResult LabelsResult + + *BaseLabelsBuilder +} + +// NewBaseLabelsBuilderWithGrouping creates a new base labels builder with grouping to compute results. +func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints ParserHint, without, noLabels bool) *BaseLabelsBuilder { + if parserKeyHints == nil { + parserKeyHints = noParserHints + } + + const labelsCapacity = 16 + return &BaseLabelsBuilder{ + del: make([]string, 0, 5), + add: [numValidCategories]labels.Labels{ + StreamLabel: make(labels.Labels, 0, labelsCapacity), + StructuredMetadataLabel: make(labels.Labels, 0, labelsCapacity), + ParsedLabel: make(labels.Labels, 0, labelsCapacity), + }, + resultCache: make(map[uint64]LabelsResult), + hasher: newHasher(), + groups: groups, + parserKeyHints: parserKeyHints, + noLabels: noLabels, + without: without, + } +} + +// NewBaseLabelsBuilder creates a new base labels builder. +func NewBaseLabelsBuilder() *BaseLabelsBuilder { + return NewBaseLabelsBuilderWithGrouping(nil, noParserHints, false, false) +} + +// ForLabels creates a labels builder for a given labels set as base. +// The labels cache is shared across all created LabelsBuilders. +func (b *BaseLabelsBuilder) ForLabels(lbs labels.Labels, hash uint64) *LabelsBuilder { + if labelResult, ok := b.resultCache[hash]; ok { + res := &LabelsBuilder{ + base: lbs, + currentResult: labelResult, + BaseLabelsBuilder: b, + } + return res + } + labelResult := NewLabelsResult(lbs.String(), hash, lbs, labels.EmptyLabels(), labels.EmptyLabels()) + b.resultCache[hash] = labelResult + res := &LabelsBuilder{ + base: lbs, + currentResult: labelResult, + BaseLabelsBuilder: b, + } + return res +} + +// Reset clears all current state for the builder. +func (b *BaseLabelsBuilder) Reset() { + b.del = b.del[:0] + for k := range b.add { + b.add[k] = b.add[k][:0] + } + b.err = "" + b.errDetails = "" + b.parserKeyHints.Reset() +} + +// ParserLabelHints returns a limited list of expected labels to extract for metric queries. +// Returns nil when it's impossible to hint labels extractions. +func (b *BaseLabelsBuilder) ParserLabelHints() ParserHint { + return b.parserKeyHints +} + +func (b *BaseLabelsBuilder) hasDel() bool { + return len(b.del) > 0 +} + +func (b *BaseLabelsBuilder) hasAdd() bool { + for _, lbls := range b.add { + if len(lbls) > 0 { + return true + } + } + return false +} + +func (b *BaseLabelsBuilder) sizeAdd() int { + var length int + for _, lbls := range b.add { + length += len(lbls) + } + return length +} + +// SetErr sets the error label. +func (b *LabelsBuilder) SetErr(err string) *LabelsBuilder { + b.err = err + return b +} + +// GetErr return the current error label value. +func (b *LabelsBuilder) GetErr() string { + return b.err +} + +// HasErr tells if the error label has been set. +func (b *LabelsBuilder) HasErr() bool { + return b.err != "" +} + +func (b *LabelsBuilder) SetErrorDetails(desc string) *LabelsBuilder { + b.errDetails = desc + return b +} + +func (b *LabelsBuilder) ResetError() *LabelsBuilder { + b.err = "" + return b +} + +func (b *LabelsBuilder) ResetErrorDetails() *LabelsBuilder { + b.errDetails = "" + return b +} + +func (b *LabelsBuilder) GetErrorDetails() string { + return b.errDetails +} + +func (b *LabelsBuilder) HasErrorDetails() bool { + return b.errDetails != "" +} + +// BaseHas returns the base labels have the given key +func (b *LabelsBuilder) BaseHas(key string) bool { + return b.base.Has(key) +} + +// GetWithCategory returns the value and the category of a labels key if it exists. +func (b *LabelsBuilder) GetWithCategory(key string) (string, LabelCategory, bool) { + for category, lbls := range b.add { + for _, l := range lbls { + if l.Name == key { + return l.Value, LabelCategory(category), true + } + } + } + for _, d := range b.del { + if d == key { + return "", InvalidCategory, false + } + } + + for _, l := range b.base { + if l.Name == key { + return l.Value, StreamLabel, true + } + } + return "", InvalidCategory, false +} + +func (b *LabelsBuilder) Get(key string) (string, bool) { + v, _, ok := b.GetWithCategory(key) + return v, ok +} + +// Del deletes the label of the given name. +func (b *LabelsBuilder) Del(ns ...string) *LabelsBuilder { + for _, n := range ns { + for category, lbls := range b.add { + for i, a := range lbls { + if a.Name == n { + b.add[category] = append(lbls[:i], lbls[i+1:]...) + } + } + } + b.del = append(b.del, n) + } + return b +} + +// Set the name/value pair as a label. +func (b *LabelsBuilder) Set(category LabelCategory, n, v string) *LabelsBuilder { + for i, a := range b.add[category] { + if a.Name == n { + b.add[category][i].Value = v + return b + } + } + b.add[category] = append(b.add[category], labels.Label{Name: n, Value: v}) + + // Sometimes labels are set and later modified. Only record + // each label once + b.parserKeyHints.RecordExtracted(n) + return b +} + +// Add the labels to the builder. If a label with the same name +// already exists in the base labels, a suffix is added to the name. +func (b *LabelsBuilder) Add(category LabelCategory, labels ...labels.Label) *LabelsBuilder { + for _, l := range labels { + name := l.Name + if b.BaseHas(name) { + name = fmt.Sprintf("%s%s", name, duplicateSuffix) + } + b.Set(category, name, l.Value) + } + return b +} + +// Labels returns the labels from the builder. If no modifications +// were made, the original labels are returned. +func (b *LabelsBuilder) labels(categories ...LabelCategory) labels.Labels { + b.buf = b.UnsortedLabels(b.buf, categories...) + sort.Sort(b.buf) + return b.buf +} + +func (b *LabelsBuilder) appendErrors(buf labels.Labels) labels.Labels { + if b.err != "" { + buf = append(buf, labels.Label{ + Name: logqlmodel.ErrorLabel, + Value: b.err, + }) + } + if b.errDetails != "" { + buf = append(buf, labels.Label{ + Name: logqlmodel.ErrorDetailsLabel, + Value: b.errDetails, + }) + } + return buf +} + +func (b *LabelsBuilder) UnsortedLabels(buf labels.Labels, categories ...LabelCategory) labels.Labels { + if categories == nil { + categories = allCategories + } + + if !b.hasDel() && !b.hasAdd() && categoriesContain(categories, StreamLabel) { + if buf == nil { + buf = make(labels.Labels, 0, len(b.base)+1) // +1 for error label. + } else { + buf = buf[:0] + } + buf = append(buf, b.base...) + if categoriesContain(categories, ParsedLabel) { + buf = b.appendErrors(buf) + } + + return buf + } + + // In the general case, labels are removed, modified or moved + // rather than added. + if buf == nil { + size := len(b.base) + b.sizeAdd() + 1 + buf = make(labels.Labels, 0, size) + } else { + buf = buf[:0] + } + if categoriesContain(categories, StreamLabel) { + Outer: + for _, l := range b.base { + // Skip stream labels to be deleted + for _, n := range b.del { + if l.Name == n { + continue Outer + } + } + // Skip stream labels which value will be replaced + for _, lbls := range b.add { + for _, la := range lbls { + if l.Name == la.Name { + continue Outer + } + } + } + buf = append(buf, l) + } + } + + for _, category := range categories { + buf = append(buf, b.add[category]...) + } + if (b.HasErr() || b.HasErrorDetails()) && categoriesContain(categories, ParsedLabel) { + buf = b.appendErrors(buf) + } + + return buf +} + +func (b *LabelsBuilder) Map() map[string]string { + if !b.hasDel() && !b.hasAdd() && !b.HasErr() { + if b.baseMap == nil { + b.baseMap = b.base.Map() + } + return b.baseMap + } + b.buf = b.UnsortedLabels(b.buf) + // todo should we also cache maps since limited by the result ? + // Maps also don't create a copy of the labels. + res := make(map[string]string, len(b.buf)) + for _, l := range b.buf { + res[l.Name] = l.Value + } + return res +} + +// LabelsResult returns the LabelsResult from the builder. +// No grouping is applied and the cache is used when possible. +func (b *LabelsBuilder) LabelsResult() LabelsResult { + // unchanged path. + if !b.hasDel() && !b.hasAdd() && !b.HasErr() { + return b.currentResult + } + + stream := b.labels(StreamLabel).Copy() + structuredMetadata := b.labels(StructuredMetadataLabel).Copy() + parsed := b.labels(ParsedLabel).Copy() + b.buf = flattenLabels(b.buf, stream, structuredMetadata, parsed) + hash := b.hasher.Hash(b.buf) + if cached, ok := b.resultCache[hash]; ok { + return cached + } + + result := NewLabelsResult(b.buf.String(), hash, stream, structuredMetadata, parsed) + b.resultCache[hash] = result + + return result +} + +func flattenLabels(buf labels.Labels, many ...labels.Labels) labels.Labels { + var size int + for _, lbls := range many { + size += len(lbls) + } + + if buf == nil || cap(buf) < size { + buf = make(labels.Labels, 0, size) + } else { + buf = buf[:0] + } + + for _, lbls := range many { + buf = append(buf, lbls...) + } + sort.Sort(buf) + return buf +} + +func (b *BaseLabelsBuilder) toUncategorizedResult(buf labels.Labels) LabelsResult { + hash := b.hasher.Hash(buf) + if cached, ok := b.resultCache[hash]; ok { + return cached + } + + res := NewLabelsResult(buf.String(), hash, buf.Copy(), nil, nil) + b.resultCache[hash] = res + return res +} + +// GroupedLabels returns the LabelsResult from the builder. +// Groups are applied and the cache is used when possible. +func (b *LabelsBuilder) GroupedLabels() LabelsResult { + if b.HasErr() { + // We need to return now before applying grouping otherwise the error might get lost. + return b.LabelsResult() + } + if b.noLabels { + return EmptyLabelsResult + } + // unchanged path. + if !b.hasDel() && !b.hasAdd() { + if len(b.groups) == 0 { + return b.currentResult + } + return b.toBaseGroup() + } + // no grouping + if len(b.groups) == 0 { + return b.LabelsResult() + } + + if b.without { + return b.withoutResult() + } + return b.withResult() +} + +func (b *LabelsBuilder) withResult() LabelsResult { + if b.buf == nil { + b.buf = make(labels.Labels, 0, len(b.groups)) + } else { + b.buf = b.buf[:0] + } +Outer: + for _, g := range b.groups { + for _, n := range b.del { + if g == n { + continue Outer + } + } + for _, la := range b.add { + for _, l := range la { + if g == l.Name { + b.buf = append(b.buf, l) + continue Outer + } + } + } + for _, l := range b.base { + if g == l.Name { + b.buf = append(b.buf, l) + continue Outer + } + } + } + return b.toUncategorizedResult(b.buf) +} + +func (b *LabelsBuilder) withoutResult() LabelsResult { + if b.buf == nil { + size := len(b.base) + b.sizeAdd() - len(b.del) - len(b.groups) + if size < 0 { + size = 0 + } + b.buf = make(labels.Labels, 0, size) + } else { + b.buf = b.buf[:0] + } +Outer: + for _, l := range b.base { + for _, n := range b.del { + if l.Name == n { + continue Outer + } + } + for _, lbls := range b.add { + for _, la := range lbls { + if l.Name == la.Name { + continue Outer + } + } + } + for _, lg := range b.groups { + if l.Name == lg { + continue Outer + } + } + b.buf = append(b.buf, l) + } + + for _, lbls := range b.add { + OuterAdd: + for _, la := range lbls { + for _, lg := range b.groups { + if la.Name == lg { + continue OuterAdd + } + } + b.buf = append(b.buf, la) + } + } + sort.Sort(b.buf) + return b.toUncategorizedResult(b.buf) +} + +func (b *LabelsBuilder) toBaseGroup() LabelsResult { + if b.groupedResult != nil { + return b.groupedResult + } + var lbs labels.Labels + if b.without { + lbs = labels.NewBuilder(b.base).Del(b.groups...).Labels() + } else { + lbs = labels.NewBuilder(b.base).Keep(b.groups...).Labels() + } + res := NewLabelsResult(lbs.String(), lbs.Hash(), lbs, nil, nil) + b.groupedResult = res + return res +} + +type internedStringSet map[string]struct { + s string + ok bool +} + +func (i internedStringSet) Get(data []byte, createNew func() (string, bool)) (string, bool) { + s, ok := i[string(data)] + if ok { + return s.s, s.ok + } + newStr, ok := createNew() + if len(i) >= MaxInternedStrings { + return newStr, ok + } + i[string(data)] = struct { + s string + ok bool + }{s: newStr, ok: ok} + return newStr, ok +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/decode.go b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/decode.go new file mode 100644 index 00000000..bb9d2b85 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/decode.go @@ -0,0 +1,231 @@ +// Adapted from https://github.com/go-logfmt/logfmt/ but []byte as parameter instead +// Original license is MIT. +package logfmt + +import ( + "bytes" + "fmt" + "unicode/utf8" +) + +// A Decoder reads and decodes logfmt records from an input stream. +type Decoder struct { + pos int + key []byte + value []byte + line []byte + err error +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond +// the logfmt records requested. +func NewDecoder(line []byte) *Decoder { + dec := &Decoder{line: line} + return dec +} + +func (dec *Decoder) Reset(line []byte) { + dec.pos = 0 + dec.line = line + dec.err = nil +} + +func (dec *Decoder) EOL() bool { + return dec.pos >= len(dec.line) +} + +// ScanKeyval advances the Decoder to the next key/value pair of the current +// record, which can then be retrieved with the Key and Value methods. It +// returns false when decoding stops, either by reaching the end of the +// current record or an error. +func (dec *Decoder) ScanKeyval() bool { + dec.key, dec.value = nil, nil + + line := dec.line + + // garbage + for p, c := range line[dec.pos:] { + if c > ' ' { + dec.pos += p + goto key + } + } + dec.pos = len(line) + return false + +key: + const invalidKeyError = "invalid key" + + start, multibyte := dec.pos, false + for p, c := range line[dec.pos:] { + switch { + case c == '=': + dec.pos += p + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) { + dec.syntaxError(invalidKeyError) + goto skip_value + } + } + if dec.key == nil { + dec.unexpectedByte(c) + goto skip_value + } + goto equal + case c == '"': + dec.pos += p + dec.unexpectedByte(c) + goto skip_value + case c <= ' ': + dec.pos += p + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) { + dec.syntaxError(invalidKeyError) + return false + } + } + return true + case c >= utf8.RuneSelf: + multibyte = true + } + } + dec.pos = len(line) + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) { + dec.syntaxError(invalidKeyError) + return false + } + } + return true + +equal: + dec.pos++ + if dec.pos >= len(line) { + return true + } + switch c := line[dec.pos]; { + case c <= ' ': + return true + case c == '"': + goto qvalue + } + + // value + start = dec.pos + for p, c := range line[dec.pos:] { + switch { + case c == '=' || c == '"': + dec.pos += p + dec.unexpectedByte(c) + goto skip_value + case c <= ' ': + dec.pos += p + if dec.pos > start { + dec.value = line[start:dec.pos] + } + return true + } + } + dec.pos = len(line) + if dec.pos > start { + dec.value = line[start:dec.pos] + } + return true + +skip_value: + for p, c := range line[dec.pos:] { + if c <= ' ' { + dec.pos += p + return false + } + } + + dec.pos = len(line) + return false + +qvalue: + const ( + untermQuote = "unterminated quoted value" + invalidQuote = "invalid quoted value" + ) + + hasEsc, esc := false, false + start = dec.pos + for p, c := range line[dec.pos+1:] { + switch { + case esc: + esc = false + case c == '\\': + hasEsc, esc = true, true + case c == '"': + dec.pos += p + 2 + if hasEsc { + v, ok := unquoteBytes(line[start:dec.pos]) + if !ok { + dec.syntaxError(invalidQuote) + return false + } + dec.value = v + } else { + start++ + end := dec.pos - 1 + if end > start { + dec.value = line[start:end] + } + } + return true + } + } + dec.pos = len(line) + dec.syntaxError(untermQuote) + return false +} + +// Key returns the most recent key found by a call to ScanKeyval. The returned +// slice may point to internal buffers and is only valid until the next call +// to ScanRecord. It does no allocation. +func (dec *Decoder) Key() []byte { + return dec.key +} + +// Value returns the most recent value found by a call to ScanKeyval. The +// returned slice may point to internal buffers and is only valid until the +// next call to ScanRecord. It does no allocation when the value has no +// escape sequences. +func (dec *Decoder) Value() []byte { + return dec.value +} + +// Err returns the first non-EOF error that was encountered by the Scanner. +func (dec *Decoder) Err() error { + return dec.err +} + +func (dec *Decoder) syntaxError(msg string) { + dec.err = &SyntaxError{ + Msg: msg, + Pos: dec.pos + 1, + } +} + +func (dec *Decoder) unexpectedByte(c byte) { + dec.err = &SyntaxError{ + Msg: fmt.Sprintf("unexpected %q", c), + Pos: dec.pos + 1, + } +} + +// A SyntaxError represents a syntax error in the logfmt input stream. +type SyntaxError struct { + Msg string + Pos int +} + +func (e *SyntaxError) Error() string { + return fmt.Sprintf("logfmt syntax error at pos %d : %s", e.Pos, e.Msg) +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/jsonstring.go b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/jsonstring.go new file mode 100644 index 00000000..d2482809 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/jsonstring.go @@ -0,0 +1,139 @@ +package logfmt + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Taken from Go's encoding/json and modified for use here. + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Unescaped quote is invalid. + case c == '"': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/lexer.go b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/lexer.go new file mode 100644 index 00000000..06756c0b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/lexer.go @@ -0,0 +1,125 @@ +package logfmt + +import ( + "bufio" + "fmt" + "io" + "text/scanner" +) + +type Scanner struct { + buf *bufio.Reader + data []interface{} + err error + debug bool +} + +func NewScanner(r io.Reader, debug bool) *Scanner { + return &Scanner{ + buf: bufio.NewReader(r), + debug: debug, + } +} + +func (sc *Scanner) Error(s string) { + sc.err = fmt.Errorf(s) + fmt.Printf("syntax error: %s\n", s) +} + +func (sc *Scanner) Reduced(rule, state int, lval *LogfmtExprSymType) bool { + if sc.debug { + fmt.Printf("rule: %v; state %v; lval: %v\n", rule, state, lval) + } + return false +} + +func (sc *Scanner) Lex(lval *LogfmtExprSymType) int { + return sc.lex(lval) +} + +func (sc *Scanner) lex(lval *LogfmtExprSymType) int { + for { + r := sc.read() + + if r == 0 { + return 0 + } + if isWhitespace(r) { + continue + } + + switch true { + case isStartIdentifier(r): + sc.unread() + lval.key = sc.scanField() + return KEY + case r == '"': + sc.unread() + lval.str = sc.scanStr() + return STRING + default: + sc.err = fmt.Errorf("unexpected char %c", r) + return 0 + } + } +} + +func isStartIdentifier(r rune) bool { + return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' +} + +func isIdentifier(r rune) bool { + return isStartIdentifier(r) || (r >= '0' && r <= '9') +} + +func (sc *Scanner) scanField() string { + var str []rune + + for { + r := sc.read() + if !isIdentifier(r) || isEndOfInput(r) { + sc.unread() + break + } + + str = append(str, r) + } + return string(str) +} + +// input is either terminated by EOF or null byte +func isEndOfInput(r rune) bool { + return r == scanner.EOF || r == rune(0) +} + +func (sc *Scanner) read() rune { + ch, _, _ := sc.buf.ReadRune() + return ch +} + +func (sc *Scanner) scanStr() string { + var str []rune + //begin with ", end with " + r := sc.read() + if r != '"' { + sc.err = fmt.Errorf("unexpected char %c", r) + return "" + } + + for { + r := sc.read() + if isEndOfInput(r) { + break + } + + if r == '"' || r == ']' { + break + } + str = append(str, r) + } + return string(str) +} + +func (sc *Scanner) unread() { _ = sc.buf.UnreadRune() } + +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y new file mode 100644 index 00000000..12660f31 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y @@ -0,0 +1,39 @@ + + +%{ +package logfmt + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +%} + +%union { + str string + key string + list []interface{} +} + +%token STRING +%token KEY + +%type key value +%type expressions + +%% + +logfmt: + expressions { setScannerData(LogfmtExprlex, $1) } + +expressions: + key { $$ = []interface{}{$1} } + | value { $$ = []interface{}{$1} } + | expressions value { $$ = append($1, $2) } + ; + +key: + KEY { $$ = $1 } + +value: + STRING { $$ = $1 } \ No newline at end of file diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y.go b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y.go new file mode 100644 index 00000000..77eb1d23 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/logfmtexpr.y.go @@ -0,0 +1,465 @@ +// Code generated by goyacc -p LogfmtExpr -o logfmtexpr.y.go logfmtexpr.y. DO NOT EDIT. + +//line logfmtexpr.y:4 +package logfmt + +import __yyfmt__ "fmt" + +//line logfmtexpr.y:4 + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +//line logfmtexpr.y:12 +type LogfmtExprSymType struct { + yys int + str string + key string + list []interface{} +} + +const STRING = 57346 +const KEY = 57347 + +var LogfmtExprToknames = [...]string{ + "$end", + "error", + "$unk", + "STRING", + "KEY", +} + +var LogfmtExprStatenames = [...]string{} + +const LogfmtExprEofCode = 1 +const LogfmtExprErrCode = 2 +const LogfmtExprInitialStackSize = 16 + +//line yacctab:1 +var LogfmtExprExca = [...]int8{ + -1, 1, + 1, -1, + -2, 0, +} + +const LogfmtExprPrivate = 57344 + +const LogfmtExprLast = 8 + +var LogfmtExprAct = [...]int8{ + 6, 5, 6, 4, 1, 2, 7, 3, +} + +var LogfmtExprPact = [...]int16{ + -4, -1000, -2, -1000, -1000, -1000, -1000, -1000, +} + +var LogfmtExprPgo = [...]int8{ + 0, 7, 3, 5, 4, +} + +var LogfmtExprR1 = [...]int8{ + 0, 4, 3, 3, 3, 1, 2, +} + +var LogfmtExprR2 = [...]int8{ + 0, 1, 1, 1, 2, 1, 1, +} + +var LogfmtExprChk = [...]int16{ + -1000, -4, -3, -1, -2, 5, 4, -2, +} + +var LogfmtExprDef = [...]int8{ + 0, -2, 1, 2, 3, 5, 6, 4, +} + +var LogfmtExprTok1 = [...]int8{ + 1, +} + +var LogfmtExprTok2 = [...]int8{ + 2, 3, 4, 5, +} + +var LogfmtExprTok3 = [...]int8{ + 0, +} + +var LogfmtExprErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + LogfmtExprDebug = 0 + LogfmtExprErrorVerbose = false +) + +type LogfmtExprLexer interface { + Lex(lval *LogfmtExprSymType) int + Error(s string) +} + +type LogfmtExprParser interface { + Parse(LogfmtExprLexer) int + Lookahead() int +} + +type LogfmtExprParserImpl struct { + lval LogfmtExprSymType + stack [LogfmtExprInitialStackSize]LogfmtExprSymType + char int +} + +func (p *LogfmtExprParserImpl) Lookahead() int { + return p.char +} + +func LogfmtExprNewParser() LogfmtExprParser { + return &LogfmtExprParserImpl{} +} + +const LogfmtExprFlag = -1000 + +func LogfmtExprTokname(c int) string { + if c >= 1 && c-1 < len(LogfmtExprToknames) { + if LogfmtExprToknames[c-1] != "" { + return LogfmtExprToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func LogfmtExprStatname(s int) string { + if s >= 0 && s < len(LogfmtExprStatenames) { + if LogfmtExprStatenames[s] != "" { + return LogfmtExprStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func LogfmtExprErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !LogfmtExprErrorVerbose { + return "syntax error" + } + + for _, e := range LogfmtExprErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + LogfmtExprTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := int(LogfmtExprPact[state]) + for tok := TOKSTART; tok-1 < len(LogfmtExprToknames); tok++ { + if n := base + tok; n >= 0 && n < LogfmtExprLast && int(LogfmtExprChk[int(LogfmtExprAct[n])]) == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if LogfmtExprDef[state] == -2 { + i := 0 + for LogfmtExprExca[i] != -1 || int(LogfmtExprExca[i+1]) != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; LogfmtExprExca[i] >= 0; i += 2 { + tok := int(LogfmtExprExca[i]) + if tok < TOKSTART || LogfmtExprExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if LogfmtExprExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += LogfmtExprTokname(tok) + } + return res +} + +func LogfmtExprlex1(lex LogfmtExprLexer, lval *LogfmtExprSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = int(LogfmtExprTok1[0]) + goto out + } + if char < len(LogfmtExprTok1) { + token = int(LogfmtExprTok1[char]) + goto out + } + if char >= LogfmtExprPrivate { + if char < LogfmtExprPrivate+len(LogfmtExprTok2) { + token = int(LogfmtExprTok2[char-LogfmtExprPrivate]) + goto out + } + } + for i := 0; i < len(LogfmtExprTok3); i += 2 { + token = int(LogfmtExprTok3[i+0]) + if token == char { + token = int(LogfmtExprTok3[i+1]) + goto out + } + } + +out: + if token == 0 { + token = int(LogfmtExprTok2[1]) /* unknown char */ + } + if LogfmtExprDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", LogfmtExprTokname(token), uint(char)) + } + return char, token +} + +func LogfmtExprParse(LogfmtExprlex LogfmtExprLexer) int { + return LogfmtExprNewParser().Parse(LogfmtExprlex) +} + +func (LogfmtExprrcvr *LogfmtExprParserImpl) Parse(LogfmtExprlex LogfmtExprLexer) int { + var LogfmtExprn int + var LogfmtExprVAL LogfmtExprSymType + var LogfmtExprDollar []LogfmtExprSymType + _ = LogfmtExprDollar // silence set and not used + LogfmtExprS := LogfmtExprrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + LogfmtExprstate := 0 + LogfmtExprrcvr.char = -1 + LogfmtExprtoken := -1 // LogfmtExprrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + LogfmtExprstate = -1 + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + }() + LogfmtExprp := -1 + goto LogfmtExprstack + +ret0: + return 0 + +ret1: + return 1 + +LogfmtExprstack: + /* put a state and value onto the stack */ + if LogfmtExprDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", LogfmtExprTokname(LogfmtExprtoken), LogfmtExprStatname(LogfmtExprstate)) + } + + LogfmtExprp++ + if LogfmtExprp >= len(LogfmtExprS) { + nyys := make([]LogfmtExprSymType, len(LogfmtExprS)*2) + copy(nyys, LogfmtExprS) + LogfmtExprS = nyys + } + LogfmtExprS[LogfmtExprp] = LogfmtExprVAL + LogfmtExprS[LogfmtExprp].yys = LogfmtExprstate + +LogfmtExprnewstate: + LogfmtExprn = int(LogfmtExprPact[LogfmtExprstate]) + if LogfmtExprn <= LogfmtExprFlag { + goto LogfmtExprdefault /* simple state */ + } + if LogfmtExprrcvr.char < 0 { + LogfmtExprrcvr.char, LogfmtExprtoken = LogfmtExprlex1(LogfmtExprlex, &LogfmtExprrcvr.lval) + } + LogfmtExprn += LogfmtExprtoken + if LogfmtExprn < 0 || LogfmtExprn >= LogfmtExprLast { + goto LogfmtExprdefault + } + LogfmtExprn = int(LogfmtExprAct[LogfmtExprn]) + if int(LogfmtExprChk[LogfmtExprn]) == LogfmtExprtoken { /* valid shift */ + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + LogfmtExprVAL = LogfmtExprrcvr.lval + LogfmtExprstate = LogfmtExprn + if Errflag > 0 { + Errflag-- + } + goto LogfmtExprstack + } + +LogfmtExprdefault: + /* default state action */ + LogfmtExprn = int(LogfmtExprDef[LogfmtExprstate]) + if LogfmtExprn == -2 { + if LogfmtExprrcvr.char < 0 { + LogfmtExprrcvr.char, LogfmtExprtoken = LogfmtExprlex1(LogfmtExprlex, &LogfmtExprrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if LogfmtExprExca[xi+0] == -1 && int(LogfmtExprExca[xi+1]) == LogfmtExprstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + LogfmtExprn = int(LogfmtExprExca[xi+0]) + if LogfmtExprn < 0 || LogfmtExprn == LogfmtExprtoken { + break + } + } + LogfmtExprn = int(LogfmtExprExca[xi+1]) + if LogfmtExprn < 0 { + goto ret0 + } + } + if LogfmtExprn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + LogfmtExprlex.Error(LogfmtExprErrorMessage(LogfmtExprstate, LogfmtExprtoken)) + Nerrs++ + if LogfmtExprDebug >= 1 { + __yyfmt__.Printf("%s", LogfmtExprStatname(LogfmtExprstate)) + __yyfmt__.Printf(" saw %s\n", LogfmtExprTokname(LogfmtExprtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for LogfmtExprp >= 0 { + LogfmtExprn = int(LogfmtExprPact[LogfmtExprS[LogfmtExprp].yys]) + LogfmtExprErrCode + if LogfmtExprn >= 0 && LogfmtExprn < LogfmtExprLast { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprn]) /* simulate a shift of "error" */ + if int(LogfmtExprChk[LogfmtExprstate]) == LogfmtExprErrCode { + goto LogfmtExprstack + } + } + + /* the current p has no shift on "error", pop stack */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", LogfmtExprS[LogfmtExprp].yys) + } + LogfmtExprp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", LogfmtExprTokname(LogfmtExprtoken)) + } + if LogfmtExprtoken == LogfmtExprEofCode { + goto ret1 + } + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + goto LogfmtExprnewstate /* try again in the same state */ + } + } + + /* reduction by production LogfmtExprn */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", LogfmtExprn, LogfmtExprStatname(LogfmtExprstate)) + } + + LogfmtExprnt := LogfmtExprn + LogfmtExprpt := LogfmtExprp + _ = LogfmtExprpt // guard against "declared and not used" + + LogfmtExprp -= int(LogfmtExprR2[LogfmtExprn]) + // LogfmtExprp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if LogfmtExprp+1 >= len(LogfmtExprS) { + nyys := make([]LogfmtExprSymType, len(LogfmtExprS)*2) + copy(nyys, LogfmtExprS) + LogfmtExprS = nyys + } + LogfmtExprVAL = LogfmtExprS[LogfmtExprp+1] + + /* consult goto table to find next state */ + LogfmtExprn = int(LogfmtExprR1[LogfmtExprn]) + LogfmtExprg := int(LogfmtExprPgo[LogfmtExprn]) + LogfmtExprj := LogfmtExprg + LogfmtExprS[LogfmtExprp].yys + 1 + + if LogfmtExprj >= LogfmtExprLast { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprg]) + } else { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprj]) + if int(LogfmtExprChk[LogfmtExprstate]) != -LogfmtExprn { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprg]) + } + } + // dummy call; replaced with literal code + switch LogfmtExprnt { + + case 1: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:27 + { + setScannerData(LogfmtExprlex, LogfmtExprDollar[1].list) + } + case 2: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:30 + { + LogfmtExprVAL.list = []interface{}{LogfmtExprDollar[1].str} + } + case 3: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:31 + { + LogfmtExprVAL.list = []interface{}{LogfmtExprDollar[1].str} + } + case 4: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-2 : LogfmtExprpt+1] +//line logfmtexpr.y:32 + { + LogfmtExprVAL.list = append(LogfmtExprDollar[1].list, LogfmtExprDollar[2].str) + } + case 5: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:36 + { + LogfmtExprVAL.str = LogfmtExprDollar[1].key + } + case 6: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:39 + { + LogfmtExprVAL.str = LogfmtExprDollar[1].str + } + } + goto LogfmtExprstack /* stack new state and value */ +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/parser.go b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/parser.go new file mode 100644 index 00000000..5a72ce1d --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/logfmt/parser.go @@ -0,0 +1,19 @@ +package logfmt + +import ( + "strings" +) + +func init() { + LogfmtExprErrorVerbose = true +} + +func Parse(expr string, debug bool) ([]interface{}, error) { + s := NewScanner(strings.NewReader(expr), debug) + LogfmtExprParse(s) + + if s.err != nil { + return nil, s.err + } + return s.data, nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/metrics_extraction.go b/vendor/github.com/grafana/loki/pkg/logql/log/metrics_extraction.go new file mode 100644 index 00000000..cd4ef3b8 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/metrics_extraction.go @@ -0,0 +1,302 @@ +package log + +import ( + "sort" + "strconv" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/labels" + + "github.com/dustin/go-humanize" +) + +const ( + ConvertBytes = "bytes" + ConvertDuration = "duration" + ConvertFloat = "float" +) + +// LineExtractor extracts a float64 from a log line. +type LineExtractor func([]byte) float64 + +var ( + CountExtractor LineExtractor = func(line []byte) float64 { return 1. } + BytesExtractor LineExtractor = func(line []byte) float64 { return float64(len(line)) } +) + +// SampleExtractor creates StreamSampleExtractor that can extract samples for a given log stream. +type SampleExtractor interface { + ForStream(labels labels.Labels) StreamSampleExtractor +} + +// StreamSampleExtractor extracts sample for a log line. +// A StreamSampleExtractor never mutate the received line. +type StreamSampleExtractor interface { + BaseLabels() LabelsResult + Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) + ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) +} + +type lineSampleExtractor struct { + Stage + LineExtractor + + baseBuilder *BaseLabelsBuilder + streamExtractors map[uint64]StreamSampleExtractor +} + +// NewLineSampleExtractor creates a SampleExtractor from a LineExtractor. +// Multiple log stages are run before converting the log line. +func NewLineSampleExtractor(ex LineExtractor, stages []Stage, groups []string, without, noLabels bool) (SampleExtractor, error) { + s := ReduceStages(stages) + hints := NewParserHint(s.RequiredLabelNames(), groups, without, noLabels, "", stages) + return &lineSampleExtractor{ + Stage: s, + LineExtractor: ex, + baseBuilder: NewBaseLabelsBuilderWithGrouping(groups, hints, without, noLabels), + streamExtractors: make(map[uint64]StreamSampleExtractor), + }, nil +} + +func (l *lineSampleExtractor) ForStream(labels labels.Labels) StreamSampleExtractor { + hash := l.baseBuilder.Hash(labels) + if res, ok := l.streamExtractors[hash]; ok { + return res + } + + res := &streamLineSampleExtractor{ + Stage: l.Stage, + LineExtractor: l.LineExtractor, + builder: l.baseBuilder.ForLabels(labels, hash), + } + l.streamExtractors[hash] = res + return res +} + +type streamLineSampleExtractor struct { + Stage + LineExtractor + builder *LabelsBuilder +} + +func (l *streamLineSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + l.builder.Reset() + l.builder.Add(StructuredMetadataLabel, structuredMetadata...) + + // short circuit. + if l.Stage == NoopStage { + return l.LineExtractor(line), l.builder.GroupedLabels(), true + } + + line, ok := l.Stage.Process(ts, line, l.builder) + if !ok { + return 0, nil, false + } + return l.LineExtractor(line), l.builder.GroupedLabels(), true +} + +func (l *streamLineSampleExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + // unsafe get bytes since we have the guarantee that the line won't be mutated. + return l.Process(ts, unsafeGetBytes(line), structuredMetadata...) +} + +func (l *streamLineSampleExtractor) BaseLabels() LabelsResult { return l.builder.currentResult } + +type convertionFn func(value string) (float64, error) + +type labelSampleExtractor struct { + preStage Stage + postFilter Stage + labelName string + conversionFn convertionFn + + baseBuilder *BaseLabelsBuilder + streamExtractors map[uint64]StreamSampleExtractor +} + +// LabelExtractorWithStages creates a SampleExtractor that will extract metrics from a labels. +// A set of log stage is executed before the conversion. A Filtering stage is executed after the conversion allowing +// to remove sample containing the __error__ label. +func LabelExtractorWithStages( + labelName, conversion string, + groups []string, without, noLabels bool, + preStages []Stage, + postFilter Stage, +) (SampleExtractor, error) { + var convFn convertionFn + switch conversion { + case ConvertBytes: + convFn = convertBytes + case ConvertDuration: + convFn = convertDuration + case ConvertFloat: + convFn = convertFloat + default: + return nil, errors.Errorf("unsupported conversion operation %s", conversion) + } + if len(groups) == 0 || without { + without = true + groups = append(groups, labelName) + sort.Strings(groups) + } + preStage := ReduceStages(preStages) + hints := NewParserHint(append(preStage.RequiredLabelNames(), postFilter.RequiredLabelNames()...), groups, without, noLabels, labelName, append(preStages, postFilter)) + return &labelSampleExtractor{ + preStage: preStage, + conversionFn: convFn, + labelName: labelName, + postFilter: postFilter, + baseBuilder: NewBaseLabelsBuilderWithGrouping(groups, hints, without, noLabels), + streamExtractors: make(map[uint64]StreamSampleExtractor), + }, nil +} + +type streamLabelSampleExtractor struct { + *labelSampleExtractor + builder *LabelsBuilder +} + +func (l *labelSampleExtractor) ForStream(labels labels.Labels) StreamSampleExtractor { + hash := l.baseBuilder.Hash(labels) + if res, ok := l.streamExtractors[hash]; ok { + return res + } + + res := &streamLabelSampleExtractor{ + labelSampleExtractor: l, + builder: l.baseBuilder.ForLabels(labels, hash), + } + l.streamExtractors[hash] = res + return res +} + +func (l *streamLabelSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + // Apply the pipeline first. + l.builder.Reset() + l.builder.Add(StructuredMetadataLabel, structuredMetadata...) + line, ok := l.preStage.Process(ts, line, l.builder) + if !ok { + return 0, nil, false + } + // convert the label value. + var v float64 + stringValue, _ := l.builder.Get(l.labelName) + if stringValue == "" { + // NOTE: It's totally fine for log line to not have this particular label. + // See Issue: https://github.com/grafana/loki/issues/6713 + return 0, nil, false + } + + var err error + v, err = l.conversionFn(stringValue) + if err != nil { + l.builder.SetErr(errSampleExtraction) + l.builder.SetErrorDetails(err.Error()) + } + + // post filters + if _, ok = l.postFilter.Process(ts, line, l.builder); !ok { + return 0, nil, false + } + return v, l.builder.GroupedLabels(), true +} + +func (l *streamLabelSampleExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + // unsafe get bytes since we have the guarantee that the line won't be mutated. + return l.Process(ts, unsafeGetBytes(line), structuredMetadata...) +} + +func (l *streamLabelSampleExtractor) BaseLabels() LabelsResult { return l.builder.currentResult } + +// NewFilteringSampleExtractor creates a sample extractor where entries from +// the underlying log stream are filtered by pipeline filters before being +// passed to extract samples. Filters are always upstream of the extractor. +func NewFilteringSampleExtractor(f []PipelineFilter, e SampleExtractor) SampleExtractor { + return &filteringSampleExtractor{ + filters: f, + extractor: e, + } +} + +type filteringSampleExtractor struct { + filters []PipelineFilter + extractor SampleExtractor +} + +func (p *filteringSampleExtractor) ForStream(labels labels.Labels) StreamSampleExtractor { + var streamFilters []streamFilter + for _, f := range p.filters { + if allMatch(f.Matchers, labels) { + streamFilters = append(streamFilters, streamFilter{ + start: f.Start, + end: f.End, + pipeline: f.Pipeline.ForStream(labels), + }) + } + } + + return &filteringStreamExtractor{ + filters: streamFilters, + extractor: p.extractor.ForStream(labels), + } +} + +type filteringStreamExtractor struct { + filters []streamFilter + extractor StreamSampleExtractor +} + +func (sp *filteringStreamExtractor) BaseLabels() LabelsResult { + return sp.extractor.BaseLabels() +} + +func (sp *filteringStreamExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + for _, filter := range sp.filters { + if ts < filter.start || ts > filter.end { + continue + } + + _, _, matches := filter.pipeline.Process(ts, line, structuredMetadata...) + if matches { // When the filter matches, don't run the next step + return 0, nil, false + } + } + + return sp.extractor.Process(ts, line) +} + +func (sp *filteringStreamExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { + for _, filter := range sp.filters { + if ts < filter.start || ts > filter.end { + continue + } + + _, _, matches := filter.pipeline.ProcessString(ts, line, structuredMetadata...) + if matches { // When the filter matches, don't run the next step + return 0, nil, false + } + } + + return sp.extractor.ProcessString(ts, line) +} + +func convertFloat(v string) (float64, error) { + return strconv.ParseFloat(v, 64) +} + +func convertDuration(v string) (float64, error) { + d, err := time.ParseDuration(v) + if err != nil { + return 0, err + } + return d.Seconds(), nil +} + +func convertBytes(v string) (float64, error) { + b, err := humanize.ParseBytes(v) + if err != nil { + return 0, err + } + return float64(b), nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/parser.go b/vendor/github.com/grafana/loki/pkg/logql/log/parser.go new file mode 100644 index 00000000..be059a28 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/parser.go @@ -0,0 +1,756 @@ +package log + +import ( + "bytes" + "errors" + "fmt" + "unicode/utf8" + + "github.com/buger/jsonparser" + + "github.com/grafana/loki/pkg/logql/log/jsonexpr" + "github.com/grafana/loki/pkg/logql/log/logfmt" + "github.com/grafana/loki/pkg/logql/log/pattern" + "github.com/grafana/loki/pkg/logqlmodel" + + "github.com/grafana/regexp" + jsoniter "github.com/json-iterator/go" + "github.com/prometheus/common/model" +) + +const ( + jsonSpacer = '_' + duplicateSuffix = "_extracted" + trueString = "true" + falseString = "false" + // How much stack space to allocate for unescaping JSON strings; if a string longer + // than this needs to be escaped, it will result in a heap allocation + unescapeStackBufSize = 64 +) + +var ( + _ Stage = &JSONParser{} + _ Stage = &RegexpParser{} + _ Stage = &LogfmtParser{} + + trueBytes = []byte("true") + + errUnexpectedJSONObject = fmt.Errorf("expecting json object(%d), but it is not", jsoniter.ObjectValue) + errMissingCapture = errors.New("at least one named capture must be supplied") + errFoundAllLabels = errors.New("found all required labels") + errLabelDoesNotMatch = errors.New("found a label with a matcher that didn't match") +) + +type JSONParser struct { + prefixBuffer []byte // buffer used to build json keys + lbs *LabelsBuilder + + keys internedStringSet + parserHints ParserHint +} + +// NewJSONParser creates a log stage that can parse a json log line and add properties as labels. +func NewJSONParser() *JSONParser { + return &JSONParser{ + prefixBuffer: make([]byte, 0, 1024), + keys: internedStringSet{}, + } +} + +func (j *JSONParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + parserHints := lbs.ParserLabelHints() + if parserHints.NoLabels() { + return line, true + } + + // reset the state. + j.prefixBuffer = j.prefixBuffer[:0] + j.lbs = lbs + j.parserHints = parserHints + + if err := jsonparser.ObjectEach(line, j.parseObject); err != nil { + if errors.Is(err, errFoundAllLabels) { + // Short-circuited + return line, true + } + + if errors.Is(err, errLabelDoesNotMatch) { + // one of the label matchers does not match. The whole line can be thrown away + return line, false + } + + addErrLabel(errJSON, err, lbs) + + return line, true + } + return line, true +} + +func (j *JSONParser) parseObject(key, value []byte, dataType jsonparser.ValueType, _ int) error { + var err error + switch dataType { + case jsonparser.String, jsonparser.Number, jsonparser.Boolean: + err = j.parseLabelValue(key, value, dataType) + case jsonparser.Object: + prefixLen := len(j.prefixBuffer) + if ok := j.nextKeyPrefix(key); ok { + err = jsonparser.ObjectEach(value, j.parseObject) + } + // rollback the prefix as we exit the current object. + j.prefixBuffer = j.prefixBuffer[:prefixLen] + return err + } + + if j.parserHints.AllRequiredExtracted() { + // Not actually an error. Parsing can be short-circuited + // and this tells jsonparser to stop parsing + return errFoundAllLabels + } + + return err +} + +// nextKeyPrefix load the next prefix in the buffer and tells if it should be processed based on hints. +func (j *JSONParser) nextKeyPrefix(key []byte) bool { + // first add the spacer if needed. + if len(j.prefixBuffer) != 0 { + j.prefixBuffer = append(j.prefixBuffer, byte(jsonSpacer)) + } + j.prefixBuffer = appendSanitized(j.prefixBuffer, key) + return j.lbs.ParserLabelHints().ShouldExtractPrefix(unsafeGetString(j.prefixBuffer)) +} + +func (j *JSONParser) parseLabelValue(key, value []byte, dataType jsonparser.ValueType) error { + // the first time we use the field as label key. + if len(j.prefixBuffer) == 0 { + key, ok := j.keys.Get(key, func() (string, bool) { + field := sanitizeLabelKey(string(key), true) + if j.lbs.BaseHas(field) { + field = field + duplicateSuffix + } + if !j.lbs.ParserLabelHints().ShouldExtract(field) { + return "", false + } + return field, true + }) + if !ok { + return nil + } + j.lbs.Set(ParsedLabel, key, readValue(value, dataType)) + if !j.parserHints.ShouldContinueParsingLine(key, j.lbs) { + return errLabelDoesNotMatch + } + return nil + + } + // otherwise we build the label key using the buffer + + // snapshot the current prefix position + prefixLen := len(j.prefixBuffer) + j.prefixBuffer = append(j.prefixBuffer, byte(jsonSpacer)) + j.prefixBuffer = appendSanitized(j.prefixBuffer, key) + keyString, ok := j.keys.Get(j.prefixBuffer, func() (string, bool) { + if j.lbs.BaseHas(string(j.prefixBuffer)) { + j.prefixBuffer = append(j.prefixBuffer, duplicateSuffix...) + } + if !j.parserHints.ShouldExtract(string(j.prefixBuffer)) { + return "", false + } + + return string(j.prefixBuffer), true + }) + + // reset the prefix position + j.prefixBuffer = j.prefixBuffer[:prefixLen] + if !ok { + return nil + } + + j.lbs.Set(ParsedLabel, keyString, readValue(value, dataType)) + if !j.parserHints.ShouldContinueParsingLine(keyString, j.lbs) { + return errLabelDoesNotMatch + } + return nil +} + +func (j *JSONParser) RequiredLabelNames() []string { return []string{} } + +func readValue(v []byte, dataType jsonparser.ValueType) string { + switch dataType { + case jsonparser.String: + return unescapeJSONString(v) + case jsonparser.Null: + return "" + case jsonparser.Number: + return string(v) + case jsonparser.Boolean: + if bytes.Equal(v, trueBytes) { + return trueString + } + return falseString + default: + return "" + } +} + +func unescapeJSONString(b []byte) string { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + bU, err := jsonparser.Unescape(b, stackbuf[:]) + if err != nil { + return "" + } + res := string(bU) + // rune error is rejected by Prometheus + for _, r := range res { + if r == utf8.RuneError { + return "" + } + } + return res +} + +type RegexpParser struct { + regex *regexp.Regexp + nameIndex map[int]string + + keys internedStringSet +} + +// NewRegexpParser creates a new log stage that can extract labels from a log line using a regex expression. +// The regex expression must contains at least one named match. If the regex doesn't match the line is not filtered out. +func NewRegexpParser(re string) (*RegexpParser, error) { + regex, err := regexp.Compile(re) + if err != nil { + return nil, err + } + if regex.NumSubexp() == 0 { + return nil, errMissingCapture + } + nameIndex := map[int]string{} + uniqueNames := map[string]struct{}{} + for i, n := range regex.SubexpNames() { + if n != "" { + if !model.LabelName(n).IsValid() { + return nil, fmt.Errorf("invalid extracted label name '%s'", n) + } + if _, ok := uniqueNames[n]; ok { + return nil, fmt.Errorf("duplicate extracted label name '%s'", n) + } + nameIndex[i] = n + uniqueNames[n] = struct{}{} + } + } + if len(nameIndex) == 0 { + return nil, errMissingCapture + } + return &RegexpParser{ + regex: regex, + nameIndex: nameIndex, + keys: internedStringSet{}, + }, nil +} + +func (r *RegexpParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + parserHints := lbs.ParserLabelHints() + for i, value := range r.regex.FindSubmatch(line) { + if name, ok := r.nameIndex[i]; ok { + key, ok := r.keys.Get(unsafeGetBytes(name), func() (string, bool) { + sanitize := sanitizeLabelKey(name, true) + if len(sanitize) == 0 { + return "", false + } + if lbs.BaseHas(sanitize) { + sanitize = fmt.Sprintf("%s%s", sanitize, duplicateSuffix) + } + if !parserHints.ShouldExtract(sanitize) { + return "", false + } + + return sanitize, true + }) + if !ok { + continue + } + + lbs.Set(ParsedLabel, key, string(value)) + if !parserHints.ShouldContinueParsingLine(key, lbs) { + return line, false + } + } + } + return line, true +} + +func (r *RegexpParser) RequiredLabelNames() []string { return []string{} } + +type LogfmtParser struct { + strict bool + keepEmpty bool + dec *logfmt.Decoder + keys internedStringSet +} + +// NewLogfmtParser creates a parser that can extract labels from a logfmt log line. +// Each keyval is extracted into a respective label. +func NewLogfmtParser(strict, keepEmpty bool) *LogfmtParser { + return &LogfmtParser{ + strict: strict, + keepEmpty: keepEmpty, + dec: logfmt.NewDecoder(nil), + keys: internedStringSet{}, + } +} + +func (l *LogfmtParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + parserHints := lbs.ParserLabelHints() + if parserHints.NoLabels() { + return line, true + } + + l.dec.Reset(line) + for !l.dec.EOL() { + ok := l.dec.ScanKeyval() + if !ok { + // for strict parsing, do not continue on errs + if l.strict { + break + } + + continue + } + + key, ok := l.keys.Get(l.dec.Key(), func() (string, bool) { + sanitized := sanitizeLabelKey(string(l.dec.Key()), true) + if len(sanitized) == 0 { + return "", false + } + + if lbs.BaseHas(sanitized) { + sanitized = fmt.Sprintf("%s%s", sanitized, duplicateSuffix) + } + + if !parserHints.ShouldExtract(sanitized) { + return "", false + } + return sanitized, true + }) + if !ok { + continue + } + + val := l.dec.Value() + // the rune error replacement is rejected by Prometheus, so we skip it. + if bytes.ContainsRune(val, utf8.RuneError) { + val = nil + } + + if !l.keepEmpty && len(val) == 0 { + continue + } + + lbs.Set(ParsedLabel, key, string(val)) + if !parserHints.ShouldContinueParsingLine(key, lbs) { + return line, false + } + + if parserHints.AllRequiredExtracted() { + break + } + } + + if l.strict && l.dec.Err() != nil { + addErrLabel(errLogfmt, l.dec.Err(), lbs) + + if !parserHints.ShouldContinueParsingLine(logqlmodel.ErrorLabel, lbs) { + return line, false + } + return line, true + } + + return line, true +} + +func (l *LogfmtParser) RequiredLabelNames() []string { return []string{} } + +type PatternParser struct { + matcher pattern.Matcher + names []string +} + +func NewPatternParser(pn string) (*PatternParser, error) { + m, err := pattern.New(pn) + if err != nil { + return nil, err + } + for _, name := range m.Names() { + if !model.LabelName(name).IsValid() { + return nil, fmt.Errorf("invalid capture label name '%s'", name) + } + } + return &PatternParser{ + matcher: m, + names: m.Names(), + }, nil +} + +func (l *PatternParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + parserHints := lbs.ParserLabelHints() + if parserHints.NoLabels() { + return line, true + } + matches := l.matcher.Matches(line) + names := l.names[:len(matches)] + for i, m := range matches { + name := names[i] + if lbs.BaseHas(name) { + name = name + duplicateSuffix + } + + if !parserHints.ShouldExtract(name) { + continue + } + + lbs.Set(ParsedLabel, name, string(m)) + if !parserHints.ShouldContinueParsingLine(name, lbs) { + return line, false + } + } + return line, true +} + +func (l *PatternParser) RequiredLabelNames() []string { return []string{} } + +type LogfmtExpressionParser struct { + expressions map[string][]interface{} + dec *logfmt.Decoder + keys internedStringSet + strict bool +} + +func NewLogfmtExpressionParser(expressions []LabelExtractionExpr, strict bool) (*LogfmtExpressionParser, error) { + if len(expressions) == 0 { + return nil, fmt.Errorf("no logfmt expression provided") + } + paths := make(map[string][]interface{}, len(expressions)) + + for _, exp := range expressions { + path, err := logfmt.Parse(exp.Expression, false) + if err != nil { + return nil, fmt.Errorf("cannot parse expression [%s]: %w", exp.Expression, err) + } + + if !model.LabelName(exp.Identifier).IsValid() { + return nil, fmt.Errorf("invalid extracted label name '%s'", exp.Identifier) + } + paths[exp.Identifier] = path + } + return &LogfmtExpressionParser{ + expressions: paths, + dec: logfmt.NewDecoder(nil), + keys: internedStringSet{}, + strict: strict, + }, nil +} + +func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + // If there are no expressions, extract common labels + // and add the suffix "_extracted" + if len(l.expressions) == 0 { + return line, false + } + + if lbs.ParserLabelHints().NoLabels() { + return line, true + } + + // Create a map of every renamed label and its original name + // in order to retrieve it later in the extraction phase + keys := make(map[string]string, len(l.expressions)) + for id, paths := range l.expressions { + keys[id] = fmt.Sprintf("%v", paths...) + if !lbs.BaseHas(id) { + lbs.Set(ParsedLabel, id, "") + } + } + + l.dec.Reset(line) + var current []byte + for !l.dec.EOL() { + ok := l.dec.ScanKeyval() + if !ok { + // for strict parsing, do not continue on errs + if l.strict { + break + } + + continue + } + + current = l.dec.Key() + key, ok := l.keys.Get(current, func() (string, bool) { + sanitized := sanitizeLabelKey(string(current), true) + if len(sanitized) == 0 { + return "", false + } + + if !lbs.ParserLabelHints().ShouldExtract(sanitized) { + return "", false + } + return sanitized, true + }) + if !ok { + continue + } + + val := l.dec.Value() + if bytes.ContainsRune(val, utf8.RuneError) { + val = nil + } + + for id, orig := range keys { + if key == orig { + key = id + break + } + } + + if _, ok := l.expressions[key]; ok { + if lbs.BaseHas(key) { + key = key + duplicateSuffix + if !lbs.ParserLabelHints().ShouldExtract(key) { + // Don't extract duplicates if we don't have to + break + } + } + + lbs.Set(ParsedLabel, key, string(val)) + + if lbs.ParserLabelHints().AllRequiredExtracted() { + break + } + } + } + if l.strict && l.dec.Err() != nil { + addErrLabel(errLogfmt, l.dec.Err(), lbs) + return line, true + } + + return line, true +} + +func (l *LogfmtExpressionParser) RequiredLabelNames() []string { return []string{} } + +type JSONExpressionParser struct { + ids []string + paths [][]string + keys internedStringSet +} + +func NewJSONExpressionParser(expressions []LabelExtractionExpr) (*JSONExpressionParser, error) { + var ids []string + var paths [][]string + for _, exp := range expressions { + path, err := jsonexpr.Parse(exp.Expression, false) + if err != nil { + return nil, fmt.Errorf("cannot parse expression [%s]: %w", exp.Expression, err) + } + + if !model.LabelName(exp.Identifier).IsValid() { + return nil, fmt.Errorf("invalid extracted label name '%s'", exp.Identifier) + } + + ids = append(ids, exp.Identifier) + paths = append(paths, pathsToString(path)) + } + + return &JSONExpressionParser{ + ids: ids, + paths: paths, + keys: internedStringSet{}, + }, nil +} + +func pathsToString(paths []interface{}) []string { + stingPaths := make([]string, 0, len(paths)) + for _, p := range paths { + switch v := p.(type) { + case int: + stingPaths = append(stingPaths, fmt.Sprintf("[%d]", v)) + case string: + stingPaths = append(stingPaths, v) + } + } + return stingPaths +} + +func (j *JSONExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + if len(line) == 0 || lbs.ParserLabelHints().NoLabels() { + return line, true + } + + // Check that the line starts correctly + // the parser will pass an error if other + // parts of the line are malformed + if !isValidJSONStart(line) { + addErrLabel(errJSON, nil, lbs) + return line, true + } + + var matches int + jsonparser.EachKey(line, func(idx int, data []byte, typ jsonparser.ValueType, err error) { + if err != nil { + addErrLabel(errJSON, err, lbs) + return + } + + identifier := j.ids[idx] + key, _ := j.keys.Get(unsafeGetBytes(identifier), func() (string, bool) { + if lbs.BaseHas(identifier) { + identifier = identifier + duplicateSuffix + } + return identifier, true + }) + + switch typ { + case jsonparser.Null: + lbs.Set(ParsedLabel, key, "") + default: + lbs.Set(ParsedLabel, key, unescapeJSONString(data)) + } + + matches++ + }, j.paths...) + + // Ensure there's a label for every value + if matches < len(j.ids) { + for _, id := range j.ids { + if _, ok := lbs.Get(id); !ok { + lbs.Set(ParsedLabel, id, "") + } + } + } + + return line, true +} + +func isValidJSONStart(data []byte) bool { + switch data[0] { + case '"', '{', '[': + return true + default: + return false + } +} + +func (j *JSONExpressionParser) RequiredLabelNames() []string { return []string{} } + +type UnpackParser struct { + lbsBuffer []string + + keys internedStringSet +} + +// NewUnpackParser creates a new unpack stage. +// The unpack stage will parse a json log line as map[string]string where each key will be translated into labels. +// A special key _entry will also be used to replace the original log line. This is to be used in conjunction with Promtail pack stage. +// see https://grafana.com/docs/loki/latest/clients/promtail/stages/pack/ +func NewUnpackParser() *UnpackParser { + return &UnpackParser{ + lbsBuffer: make([]string, 0, 16), + keys: internedStringSet{}, + } +} + +func (UnpackParser) RequiredLabelNames() []string { return []string{} } + +func (u *UnpackParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + if len(line) == 0 || lbs.ParserLabelHints().NoLabels() { + return line, true + } + + // we only care about object and values. + if line[0] != '{' { + addErrLabel(errJSON, errUnexpectedJSONObject, lbs) + return line, true + } + + u.lbsBuffer = u.lbsBuffer[:0] + entry, err := u.unpack(line, lbs) + if err != nil { + if errors.Is(err, errLabelDoesNotMatch) { + return entry, false + } + addErrLabel(errJSON, err, lbs) + return line, true + } + + return entry, true +} + +func addErrLabel(msg string, err error, lbs *LabelsBuilder) { + lbs.SetErr(msg) + + if err != nil { + lbs.SetErrorDetails(err.Error()) + } + + if lbs.ParserLabelHints().PreserveError() { + lbs.Set(ParsedLabel, logqlmodel.PreserveErrorLabel, "true") + } +} + +func (u *UnpackParser) unpack(entry []byte, lbs *LabelsBuilder) ([]byte, error) { + var isPacked bool + err := jsonparser.ObjectEach(entry, func(key, value []byte, typ jsonparser.ValueType, _ int) error { + switch typ { + case jsonparser.String: + if unsafeGetString(key) == logqlmodel.PackedEntryKey { + // Inlined bytes escape to save allocs + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + bU, err := jsonparser.Unescape(value, stackbuf[:]) + if err != nil { + return err + } + + entry = bU + isPacked = true + return nil + } + key, ok := u.keys.Get(key, func() (string, bool) { + field := string(key) + if lbs.BaseHas(field) { + field = field + duplicateSuffix + } + if !lbs.ParserLabelHints().ShouldExtract(field) { + return "", false + } + return field, true + }) + if !ok { + return nil + } + + // append to the buffer of labels + u.lbsBuffer = append(u.lbsBuffer, sanitizeLabelKey(key, true), unescapeJSONString(value)) + default: + return nil + } + + return nil + }) + + if err != nil { + return nil, err + } + + // flush the buffer if we found a packed entry. + if isPacked { + for i := 0; i < len(u.lbsBuffer); i = i + 2 { + lbs.Set(ParsedLabel, u.lbsBuffer[i], u.lbsBuffer[i+1]) + if !lbs.ParserLabelHints().ShouldContinueParsingLine(u.lbsBuffer[i], lbs) { + return entry, errLabelDoesNotMatch + } + } + } + return entry, nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/parser_hints.go b/vendor/github.com/grafana/loki/pkg/logql/log/parser_hints.go new file mode 100644 index 00000000..cdb61015 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/parser_hints.go @@ -0,0 +1,205 @@ +package log + +import ( + "strings" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +var noParserHints = &Hints{} + +// ParserHint are hints given to LogQL parsers. +// This is specially useful for parser that extract implicitly all possible label keys. +// This is used only within metric queries since it's rare that you need all label keys. +// For example in the following expression: +// +// sum by (status_code) (rate({app="foo"} | json [5m])) +// +// All we need to extract is the status_code in the json parser. +type ParserHint interface { + // Tells if a label with the given key should be extracted. + ShouldExtract(key string) bool + + // Tells if there's any hint that start with the given prefix. + // This allows to speed up key searching in nested structured like json. + ShouldExtractPrefix(prefix string) bool + + // Tells if we should not extract any labels. + // For example in : + // sum(rate({app="foo"} | json [5m])) + // We don't need to extract any labels from the log line. + NoLabels() bool + + // Holds state about what's already been extracted for the associated + // labels. This assumes that only required labels are ever extracted + RecordExtracted(string) + + // Returns true if all the required labels have already been extracted + AllRequiredExtracted() bool + + // Resets the state of extracted labels + Reset() + + // PreserveError returns true when parsing errors were specifically requested + PreserveError() bool + + // ShouldContinueParsingLine returns true when there is no label matcher for the + // provided label or the passed label and value match what's in the pipeline + ShouldContinueParsingLine(labelName string, lbs *LabelsBuilder) bool +} + +type Hints struct { + noLabels bool + requiredLabels []string + shouldPreserveError bool + extracted []string + labelFilters []LabelFilterer + labelNames []string +} + +func (p *Hints) ShouldExtract(key string) bool { + if len(p.requiredLabels) == 0 { + return true + } + + for _, l := range p.extracted { + if l == key { + return false + } + } + + for _, l := range p.requiredLabels { + if l == key { + return true + } + } + + return false +} + +func (p *Hints) ShouldExtractPrefix(prefix string) bool { + if len(p.requiredLabels) == 0 { + return true + } + for _, l := range p.requiredLabels { + if strings.HasPrefix(l, prefix) { + return true + } + } + + return false +} + +func (p *Hints) NoLabels() bool { + return p.noLabels || p.AllRequiredExtracted() +} + +func (p *Hints) RecordExtracted(key string) { + for _, l := range p.requiredLabels { + if l == key { + p.extracted = append(p.extracted, key) + return + } + } +} + +func (p *Hints) AllRequiredExtracted() bool { + if len(p.requiredLabels) == 0 { + return false + } + return len(p.extracted) == len(p.requiredLabels) +} + +func (p *Hints) Reset() { + p.extracted = p.extracted[:0] +} + +func (p *Hints) PreserveError() bool { + return p.shouldPreserveError +} + +func (p *Hints) ShouldContinueParsingLine(labelName string, lbs *LabelsBuilder) bool { + for i := 0; i < len(p.labelNames); i++ { + if p.labelNames[i] == labelName { + _, matches := p.labelFilters[i].Process(0, nil, lbs) + return matches + } + } + return true +} + +// NewParserHint creates a new parser hint using the list of labels that are seen and required in a query. +func NewParserHint(requiredLabelNames, groups []string, without, noLabels bool, metricLabelName string, stages []Stage) *Hints { + hints := make([]string, 0, 2*(len(requiredLabelNames)+len(groups)+1)) + hints = appendLabelHints(hints, requiredLabelNames...) + hints = appendLabelHints(hints, groups...) + hints = appendLabelHints(hints, metricLabelName) + hints = uniqueString(hints) + + // Save the names next to the filters to avoid an alloc when f.RequiredLabelNames() is called + var labelNames []string + var labelFilters []LabelFilterer + for _, s := range stages { + switch f := s.(type) { + case *BinaryLabelFilter: + // TODO: as long as each leg of the binary filter operates on the same (and only 1) label, + // we should be able to add this to our filters + continue + case LabelFilterer: + if len(f.RequiredLabelNames()) > 1 { + // Hints can only operate on one label at a time + continue + } + labelFilters = append(labelFilters, f) + labelNames = append(labelNames, f.RequiredLabelNames()...) + } + } + + extracted := make([]string, 0, len(hints)) + if noLabels { + if len(hints) > 0 { + return &Hints{requiredLabels: hints, extracted: extracted, shouldPreserveError: containsError(hints), labelFilters: labelFilters, labelNames: labelNames} + } + return &Hints{noLabels: true} + } + + ph := &Hints{labelFilters: labelFilters, labelNames: labelNames} + + // we don't know what is required when a without clause is used. + // Same is true when there's no grouping. + // no hints available then. + if without || len(groups) == 0 { + return ph + } + + ph.requiredLabels = hints + ph.shouldPreserveError = containsError(hints) + + return &Hints{requiredLabels: hints, extracted: extracted, shouldPreserveError: containsError(hints)} +} + +func containsError(hints []string) bool { + for _, s := range hints { + if s == logqlmodel.ErrorLabel { + return true + } + } + return false +} + +// appendLabelHints Appends the label to the list of hints with and without the duplicate suffix. +// If a parsed label collides with a stream label we add the `_extracted` suffix to it, however hints +// are used by the parsers before we know they will collide with a stream label and hence before the +// _extracted suffix is added. Therefore we must strip the _extracted suffix from any required labels +// that were parsed from somewhere in the query, say in a filter or an aggregation clause. +// Because it's possible for a valid json or logfmt key to already end with _extracted, we'll just +// leave the existing entry ending with _extracted but also add a version with the suffix removed. +func appendLabelHints(dst []string, src ...string) []string { + for _, l := range src { + dst = append(dst, l) + if strings.HasSuffix(l, duplicateSuffix) { + dst = append(dst, strings.TrimSuffix(l, duplicateSuffix)) + } + } + return dst +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/ast.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/ast.go new file mode 100644 index 00000000..b4cf8e81 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/ast.go @@ -0,0 +1,86 @@ +package pattern + +import ( + "fmt" + "unicode/utf8" +) + +type node interface { + fmt.Stringer +} + +type expr []node + +func (e expr) hasCapture() bool { + return e.captureCount() != 0 +} + +func (e expr) validate() error { + if !e.hasCapture() { + return ErrNoCapture + } + // Consecutive captures are not allowed. + for i, n := range e { + if i+1 >= len(e) { + break + } + if _, ok := n.(capture); ok { + if _, ok := e[i+1].(capture); ok { + return fmt.Errorf("found consecutive capture '%s': %w", n.String()+e[i+1].String(), ErrInvalidExpr) + } + } + } + + caps := e.captures() + uniq := map[string]struct{}{} + for _, c := range caps { + if _, ok := uniq[c]; ok { + return fmt.Errorf("duplicate capture name (%s): %w", c, ErrInvalidExpr) + } + uniq[c] = struct{}{} + } + return nil +} + +func (e expr) captures() (captures []string) { + for _, n := range e { + if c, ok := n.(capture); ok && !c.isUnamed() { + captures = append(captures, c.Name()) + } + } + return +} + +func (e expr) captureCount() (count int) { + return len(e.captures()) +} + +type capture string + +func (c capture) String() string { + return "<" + string(c) + ">" +} + +func (c capture) Name() string { + return string(c) +} + +func (c capture) isUnamed() bool { + return string(c) == underscore +} + +type literals []byte + +func (l literals) String() string { + return string(l) +} + +func runesToLiterals(rs []rune) literals { + res := make([]byte, len(rs)*utf8.UTFMax) + count := 0 + for _, r := range rs { + count += utf8.EncodeRune(res[count:], r) + } + res = res[:count] + return res +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y new file mode 100644 index 00000000..24729a3c --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y @@ -0,0 +1,45 @@ +%{ + +package pattern + +%} + +%union{ + Expr []node + Node node + + literal rune + Literals []rune + str string + token int +} + +%start root + +%type expr +%type node +%type literals + +%token IDENTIFIER +%token LITERAL +%token LESS_THAN MORE_THAN UNDERSCORE + +%% + +root: + expr { exprlex.(*lexer).expr = $1 }; + +expr: + node { $$ = []node{$1} } + | expr node { $$ = append($1, $2) } + ; + +node: + IDENTIFIER { $$ = capture($1) } + | literals { $$ = runesToLiterals($1) } + ; + +literals: + LITERAL { $$ = []rune{$1} } + | literals LITERAL { $$ = append($1, $2) } +%% diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y.go new file mode 100644 index 00000000..d9d67d2b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/expr.y.go @@ -0,0 +1,462 @@ +// Code generated by goyacc -p expr -o pkg/logql/log/pattern/expr.y.go pkg/logql/log/pattern/expr.y. DO NOT EDIT. + +package pattern + +import __yyfmt__ "fmt" + +type exprSymType struct { + yys int + Expr []node + Node node + + literal rune + Literals []rune + str string + token int +} + +const IDENTIFIER = 57346 +const LITERAL = 57347 +const LESS_THAN = 57348 +const MORE_THAN = 57349 +const UNDERSCORE = 57350 + +var exprToknames = [...]string{ + "$end", + "error", + "$unk", + "IDENTIFIER", + "LITERAL", + "LESS_THAN", + "MORE_THAN", + "UNDERSCORE", +} +var exprStatenames = [...]string{} + +const exprEofCode = 1 +const exprErrCode = 2 +const exprInitialStackSize = 16 + +var exprExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, +} + +const exprPrivate = 57344 + +const exprLast = 8 + +var exprAct = [...]int{ + + 4, 6, 8, 3, 5, 2, 7, 1, +} +var exprPact = [...]int{ + + -4, -1000, -4, -1000, -1000, -3, -1000, -1000, -1000, +} +var exprPgo = [...]int{ + + 0, 7, 5, 3, 4, +} +var exprR1 = [...]int{ + + 0, 1, 2, 2, 3, 3, 4, 4, +} +var exprR2 = [...]int{ + + 0, 1, 1, 2, 1, 1, 1, 2, +} +var exprChk = [...]int{ + + -1000, -1, -2, -3, 4, -4, 5, -3, 5, +} +var exprDef = [...]int{ + + 0, -2, 1, 2, 4, 5, 6, 3, 7, +} +var exprTok1 = [...]int{ + + 1, +} +var exprTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, +} +var exprTok3 = [...]int{ + 0, +} + +var exprErrorMessages = [...]struct { + state int + token int + msg string +}{} + +/* parser for yacc output */ + +var ( + exprDebug = 0 + exprErrorVerbose = false +) + +type exprLexer interface { + Lex(lval *exprSymType) int + Error(s string) +} + +type exprParser interface { + Parse(exprLexer) int + Lookahead() int +} + +type exprParserImpl struct { + lval exprSymType + stack [exprInitialStackSize]exprSymType + char int +} + +func (p *exprParserImpl) Lookahead() int { + return p.char +} + +func exprNewParser() exprParser { + return &exprParserImpl{} +} + +const exprFlag = -1000 + +func exprTokname(c int) string { + if c >= 1 && c-1 < len(exprToknames) { + if exprToknames[c-1] != "" { + return exprToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func exprStatname(s int) string { + if s >= 0 && s < len(exprStatenames) { + if exprStatenames[s] != "" { + return exprStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func exprErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !exprErrorVerbose { + return "syntax error" + } + + for _, e := range exprErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + exprTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := exprPact[state] + for tok := TOKSTART; tok-1 < len(exprToknames); tok++ { + if n := base + tok; n >= 0 && n < exprLast && exprChk[exprAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if exprDef[state] == -2 { + i := 0 + for exprExca[i] != -1 || exprExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; exprExca[i] >= 0; i += 2 { + tok := exprExca[i] + if tok < TOKSTART || exprExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if exprExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += exprTokname(tok) + } + return res +} + +func exprlex1(lex exprLexer, lval *exprSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = exprTok1[0] + goto out + } + if char < len(exprTok1) { + token = exprTok1[char] + goto out + } + if char >= exprPrivate { + if char < exprPrivate+len(exprTok2) { + token = exprTok2[char-exprPrivate] + goto out + } + } + for i := 0; i < len(exprTok3); i += 2 { + token = exprTok3[i+0] + if token == char { + token = exprTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = exprTok2[1] /* unknown char */ + } + if exprDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", exprTokname(token), uint(char)) + } + return char, token +} + +func exprParse(exprlex exprLexer) int { + return exprNewParser().Parse(exprlex) +} + +func (exprrcvr *exprParserImpl) Parse(exprlex exprLexer) int { + var exprn int + var exprVAL exprSymType + var exprDollar []exprSymType + _ = exprDollar // silence set and not used + exprS := exprrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + exprstate := 0 + exprrcvr.char = -1 + exprtoken := -1 // exprrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + exprstate = -1 + exprrcvr.char = -1 + exprtoken = -1 + }() + exprp := -1 + goto exprstack + +ret0: + return 0 + +ret1: + return 1 + +exprstack: + /* put a state and value onto the stack */ + if exprDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", exprTokname(exprtoken), exprStatname(exprstate)) + } + + exprp++ + if exprp >= len(exprS) { + nyys := make([]exprSymType, len(exprS)*2) + copy(nyys, exprS) + exprS = nyys + } + exprS[exprp] = exprVAL + exprS[exprp].yys = exprstate + +exprnewstate: + exprn = exprPact[exprstate] + if exprn <= exprFlag { + goto exprdefault /* simple state */ + } + if exprrcvr.char < 0 { + exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) + } + exprn += exprtoken + if exprn < 0 || exprn >= exprLast { + goto exprdefault + } + exprn = exprAct[exprn] + if exprChk[exprn] == exprtoken { /* valid shift */ + exprrcvr.char = -1 + exprtoken = -1 + exprVAL = exprrcvr.lval + exprstate = exprn + if Errflag > 0 { + Errflag-- + } + goto exprstack + } + +exprdefault: + /* default state action */ + exprn = exprDef[exprstate] + if exprn == -2 { + if exprrcvr.char < 0 { + exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if exprExca[xi+0] == -1 && exprExca[xi+1] == exprstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + exprn = exprExca[xi+0] + if exprn < 0 || exprn == exprtoken { + break + } + } + exprn = exprExca[xi+1] + if exprn < 0 { + goto ret0 + } + } + if exprn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + exprlex.Error(exprErrorMessage(exprstate, exprtoken)) + Nerrs++ + if exprDebug >= 1 { + __yyfmt__.Printf("%s", exprStatname(exprstate)) + __yyfmt__.Printf(" saw %s\n", exprTokname(exprtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for exprp >= 0 { + exprn = exprPact[exprS[exprp].yys] + exprErrCode + if exprn >= 0 && exprn < exprLast { + exprstate = exprAct[exprn] /* simulate a shift of "error" */ + if exprChk[exprstate] == exprErrCode { + goto exprstack + } + } + + /* the current p has no shift on "error", pop stack */ + if exprDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", exprS[exprp].yys) + } + exprp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if exprDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", exprTokname(exprtoken)) + } + if exprtoken == exprEofCode { + goto ret1 + } + exprrcvr.char = -1 + exprtoken = -1 + goto exprnewstate /* try again in the same state */ + } + } + + /* reduction by production exprn */ + if exprDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", exprn, exprStatname(exprstate)) + } + + exprnt := exprn + exprpt := exprp + _ = exprpt // guard against "declared and not used" + + exprp -= exprR2[exprn] + // exprp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if exprp+1 >= len(exprS) { + nyys := make([]exprSymType, len(exprS)*2) + copy(nyys, exprS) + exprS = nyys + } + exprVAL = exprS[exprp+1] + + /* consult goto table to find next state */ + exprn = exprR1[exprn] + exprg := exprPgo[exprn] + exprj := exprg + exprS[exprp].yys + 1 + + if exprj >= exprLast { + exprstate = exprAct[exprg] + } else { + exprstate = exprAct[exprj] + if exprChk[exprstate] != -exprn { + exprstate = exprAct[exprg] + } + } + // dummy call; replaced with literal code + switch exprnt { + + case 1: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprlex.(*lexer).expr = exprDollar[1].Expr + } + case 2: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Expr = []node{exprDollar[1].Node} + } + case 3: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.Expr = append(exprDollar[1].Expr, exprDollar[2].Node) + } + case 4: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Node = capture(exprDollar[1].str) + } + case 5: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Node = runesToLiterals(exprDollar[1].Literals) + } + case 6: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Literals = []rune{exprDollar[1].literal} + } + case 7: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.Literals = append(exprDollar[1].Literals, exprDollar[2].literal) + } + } + goto exprstack /* stack new state and value */ +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.go new file mode 100644 index 00000000..2f867f93 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.go @@ -0,0 +1,62 @@ +package pattern + +type lexer struct { + data []byte + p, pe, cs int + ts, te, act int + + lastnewline int + curline int + + errs []parseError + expr []node +} + +func newLexer() *lexer { + lex := &lexer{} + lex.init() + return lex +} + +func (lex *lexer) setData(data []byte) { + lex.data = data + lex.pe = len(data) + lex.lastnewline = -1 + lex.curline = 1 +} + +// Error implements exprLexer interface generated by yacc (yyLexer) +func (lex *lexer) Error(e string) { + lex.errs = append(lex.errs, newParseError(e, lex.curline, lex.curcol())) +} + +// curcol calculates the current token's start column based on the last newline position +// returns a 1-indexed value +func (lex *lexer) curcol() int { + return (lex.ts + 1 /* 1-indexed columns */) - (lex.lastnewline + 1 /* next after newline */) +} + +func (lex *lexer) handle(token int, err error) int { + if err != nil { + lex.Error(err.Error()) + return LEXER_ERROR + } + return token +} + +func (lex *lexer) token() string { + return string(lex.data[lex.ts:lex.te]) +} + +// nolint +func (lex *lexer) identifier(out *exprSymType) (int, error) { + t := lex.token() + out.str = t[1 : len(t)-1] + return IDENTIFIER, nil +} + +// nolint +func (lex *lexer) literal(out *exprSymType) (int, error) { + out.literal = rune(lex.data[lex.ts]) + return LITERAL, nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl new file mode 100644 index 00000000..7b1d2546 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl @@ -0,0 +1,43 @@ +package pattern + +%%{ + machine pattern; + write data; + access lex.; + variable p lex.p; + variable pe lex.pe; + prepush { + if len(lex.stack) <= lex.top { + lex.stack = append(lex.stack, 0) + } + } +}%% + +const LEXER_ERROR = 0 + +%%{ + identifier = '<' (alpha| '_') (alnum | '_' )* '>'; + literal = any; +}%% + +func (lex *lexer) Lex(out *exprSymType) int { + eof := lex.pe + tok := 0 + + %%{ + + main := |* + identifier => { tok = lex.handle(lex.identifier(out)); fbreak; }; + literal => { tok = lex.handle(lex.literal(out)); fbreak; }; + *|; + + write exec; + }%% + + return tok; +} + + +func (lex *lexer) init() { + %% write init; +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl.go new file mode 100644 index 00000000..4e3b3e18 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/lexer.rl.go @@ -0,0 +1,251 @@ +//line pkg/logql/log/pattern/lexer.rl:1 +package pattern + +//line pkg/logql/log/pattern/lexer.rl.go:7 +var _pattern_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 3, + 1, 4, 1, 5, 1, 6, +} + +var _pattern_key_offsets []byte = []byte{ + 0, 8, 9, +} + +var _pattern_trans_keys []byte = []byte{ + 62, 95, 48, 57, 65, 90, 97, 122, + 60, 95, 65, 90, 97, 122, +} + +var _pattern_single_lengths []byte = []byte{ + 2, 1, 1, +} + +var _pattern_range_lengths []byte = []byte{ + 3, 0, 2, +} + +var _pattern_index_offsets []byte = []byte{ + 0, 6, 8, +} + +var _pattern_trans_targs []byte = []byte{ + 1, 0, 0, 0, 0, 1, 2, 1, + 0, 0, 0, 1, 1, 1, +} + +var _pattern_trans_actions []byte = []byte{ + 7, 0, 0, 0, 0, 13, 5, 9, + 0, 0, 0, 11, 13, 11, +} + +var _pattern_to_state_actions []byte = []byte{ + 0, 1, 0, +} + +var _pattern_from_state_actions []byte = []byte{ + 0, 3, 0, +} + +var _pattern_eof_trans []byte = []byte{ + 13, 0, 14, +} + +const pattern_start int = 1 +const pattern_first_final int = 1 +const pattern_error int = -1 + +const pattern_en_main int = 1 + +//line pkg/logql/log/pattern/lexer.rl:14 + +const LEXER_ERROR = 0 + +//line pkg/logql/log/pattern/lexer.rl:21 + +func (lex *lexer) Lex(out *exprSymType) int { + eof := lex.pe + tok := 0 + +//line pkg/logql/log/pattern/lexer.rl.go:77 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if (lex.p) == (lex.pe) { + goto _test_eof + } + _resume: + _acts = int(_pattern_from_state_actions[lex.cs]) + _nacts = uint(_pattern_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _pattern_actions[_acts-1] { + case 1: +//line NONE:1 + lex.ts = (lex.p) + +//line pkg/logql/log/pattern/lexer.rl.go:97 + } + } + + _keys = int(_pattern_key_offsets[lex.cs]) + _trans = int(_pattern_index_offsets[lex.cs]) + + _klen = int(_pattern_single_lengths[lex.cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case lex.data[(lex.p)] < _pattern_trans_keys[_mid]: + _upper = _mid - 1 + case lex.data[(lex.p)] > _pattern_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_pattern_range_lengths[lex.cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case lex.data[(lex.p)] < _pattern_trans_keys[_mid]: + _upper = _mid - 2 + case lex.data[(lex.p)] > _pattern_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _eof_trans: + lex.cs = int(_pattern_trans_targs[_trans]) + + if _pattern_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_pattern_trans_actions[_trans]) + _nacts = uint(_pattern_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _pattern_actions[_acts-1] { + case 2: +//line NONE:1 + lex.te = (lex.p) + 1 + + case 3: +//line pkg/logql/log/pattern/lexer.rl:30 + lex.te = (lex.p) + 1 + { + tok = lex.handle(lex.identifier(out)) + (lex.p)++ + goto _out + } + case 4: +//line pkg/logql/log/pattern/lexer.rl:31 + lex.te = (lex.p) + 1 + { + tok = lex.handle(lex.literal(out)) + (lex.p)++ + goto _out + } + case 5: +//line pkg/logql/log/pattern/lexer.rl:31 + lex.te = (lex.p) + (lex.p)-- + { + tok = lex.handle(lex.literal(out)) + (lex.p)++ + goto _out + } + case 6: +//line pkg/logql/log/pattern/lexer.rl:31 + (lex.p) = (lex.te) - 1 + { + tok = lex.handle(lex.literal(out)) + (lex.p)++ + goto _out + } +//line pkg/logql/log/pattern/lexer.rl.go:191 + } + } + + _again: + _acts = int(_pattern_to_state_actions[lex.cs]) + _nacts = uint(_pattern_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _pattern_actions[_acts-1] { + case 0: +//line NONE:1 + lex.ts = 0 + +//line pkg/logql/log/pattern/lexer.rl.go:205 + } + } + + (lex.p)++ + if (lex.p) != (lex.pe) { + goto _resume + } + _test_eof: + { + } + if (lex.p) == eof { + if _pattern_eof_trans[lex.cs] > 0 { + _trans = int(_pattern_eof_trans[lex.cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line pkg/logql/log/pattern/lexer.rl:35 + + return tok +} + +func (lex *lexer) init() { + +//line pkg/logql/log/pattern/lexer.rl.go:233 + { + lex.cs = pattern_start + lex.ts = 0 + lex.te = 0 + lex.act = 0 + } + +//line pkg/logql/log/pattern/lexer.rl:43 +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/parser.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/parser.go new file mode 100644 index 00000000..d1bc2515 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/parser.go @@ -0,0 +1,50 @@ +package pattern + +import "fmt" + +const underscore = "_" + +var tokens = map[int]string{ + LESS_THAN: "<", + MORE_THAN: ">", + UNDERSCORE: underscore, +} + +func init() { + // Improve the error messages coming out of yacc. + exprErrorVerbose = true + for tok, str := range tokens { + exprToknames[tok-exprPrivate+1] = str + } +} + +func parseExpr(input string) (expr, error) { + l := newLexer() + l.setData([]byte(input)) + e := exprNewParser().Parse(l) + if e != 0 || len(l.errs) > 0 { + return nil, l.errs[0] + } + return l.expr, nil +} + +// parseError is what is returned when we failed to parse. +type parseError struct { + msg string + line, col int +} + +func (p parseError) Error() string { + if p.col == 0 && p.line == 0 { + return p.msg + } + return fmt.Sprintf("parse error at line %d, col %d: %s", p.line, p.col, p.msg) +} + +func newParseError(msg string, line, col int) parseError { + return parseError{ + msg: msg, + line: line, + col: col, + } +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pattern/pattern.go b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/pattern.go new file mode 100644 index 00000000..b08c91b6 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pattern/pattern.go @@ -0,0 +1,95 @@ +package pattern + +import ( + "bytes" + "errors" +) + +var ( + ErrNoCapture = errors.New("at least one capture is required") + ErrInvalidExpr = errors.New("invalid expression") +) + +type Matcher interface { + Matches(in []byte) [][]byte + Names() []string +} + +type matcher struct { + e expr + + captures [][]byte + names []string +} + +func New(in string) (Matcher, error) { + e, err := parseExpr(in) + if err != nil { + return nil, err + } + if err := e.validate(); err != nil { + return nil, err + } + return &matcher{ + e: e, + captures: make([][]byte, 0, e.captureCount()), + names: e.captures(), + }, nil +} + +// Matches matches the given line with the provided pattern. +// Matches invalidates the previous returned captures array. +func (m *matcher) Matches(in []byte) [][]byte { + if len(in) == 0 { + return nil + } + if len(m.e) == 0 { + return nil + } + captures := m.captures[:0] + expr := m.e + if ls, ok := expr[0].(literals); ok { + i := bytes.Index(in, ls) + if i != 0 { + return nil + } + in = in[len(ls):] + expr = expr[1:] + } + if len(expr) == 0 { + return nil + } + // from now we have capture - literals - capture ... (literals)? + for len(expr) != 0 { + if len(expr) == 1 { // we're ending on a capture. + if !(expr[0].(capture)).isUnamed() { + captures = append(captures, in) + } + return captures + } + capt := expr[0].(capture) + ls := expr[1].(literals) + expr = expr[2:] + i := bytes.Index(in, ls) + if i == -1 { + // if a capture is missed we return up to the end as the capture. + if !capt.isUnamed() { + captures = append(captures, in) + } + return captures + } + + if capt.isUnamed() { + in = in[len(ls)+i:] + continue + } + captures = append(captures, in[:i]) + in = in[len(ls)+i:] + } + + return captures +} + +func (m *matcher) Names() []string { + return m.names +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/pipeline.go b/vendor/github.com/grafana/loki/pkg/logql/log/pipeline.go new file mode 100644 index 00000000..31665e7b --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/pipeline.go @@ -0,0 +1,363 @@ +package log + +import ( + "reflect" + "sync" + "unsafe" + + "github.com/prometheus/prometheus/model/labels" +) + +// NoopStage is a stage that doesn't process a log line. +var NoopStage Stage = &noopStage{} + +// Pipeline can create pipelines for each log stream. +type Pipeline interface { + ForStream(labels labels.Labels) StreamPipeline + Reset() +} + +// StreamPipeline transform and filter log lines and labels. +// A StreamPipeline never mutate the received line. +type StreamPipeline interface { + BaseLabels() LabelsResult + // Process processes a log line and returns the transformed line and the labels. + // The buffer returned for the log line can be reused on subsequent calls to Process and therefore must be copied. + Process(ts int64, line []byte, structuredMetadata ...labels.Label) (resultLine []byte, resultLabels LabelsResult, matches bool) + ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (resultLine string, resultLabels LabelsResult, matches bool) +} + +// Stage is a single step of a Pipeline. +// A Stage implementation should never mutate the line passed, but instead either +// return the line unchanged or allocate a new line. +type Stage interface { + Process(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) + RequiredLabelNames() []string +} + +// NewNoopPipeline creates a pipelines that does not process anything and returns log streams as is. +func NewNoopPipeline() Pipeline { + return &noopPipeline{ + cache: map[uint64]*noopStreamPipeline{}, + baseBuilder: NewBaseLabelsBuilder(), + } +} + +type noopPipeline struct { + cache map[uint64]*noopStreamPipeline + baseBuilder *BaseLabelsBuilder + mu sync.RWMutex +} + +func (n *noopPipeline) ForStream(labels labels.Labels) StreamPipeline { + h := n.baseBuilder.Hash(labels) + + n.mu.RLock() + if cached, ok := n.cache[h]; ok { + n.mu.RUnlock() + return cached + } + n.mu.RUnlock() + + sp := &noopStreamPipeline{n.baseBuilder.ForLabels(labels, h)} + + n.mu.Lock() + defer n.mu.Unlock() + + n.cache[h] = sp + return sp +} + +func (n *noopPipeline) Reset() { + n.mu.Lock() + defer n.mu.Unlock() + + for k := range n.cache { + delete(n.cache, k) + } +} + +// IsNoopPipeline tells if a pipeline is a Noop. +func IsNoopPipeline(p Pipeline) bool { + _, ok := p.(*noopPipeline) + return ok +} + +type noopStreamPipeline struct { + builder *LabelsBuilder +} + +func (n noopStreamPipeline) Process(_ int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) { + n.builder.Reset() + n.builder.Add(StructuredMetadataLabel, structuredMetadata...) + return line, n.builder.LabelsResult(), true +} + +func (n noopStreamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) { + _, lr, ok := n.Process(ts, unsafeGetBytes(line), structuredMetadata...) + return line, lr, ok +} + +func (n noopStreamPipeline) BaseLabels() LabelsResult { return n.builder.currentResult } + +type noopStage struct{} + +func (noopStage) Process(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) { + return line, true +} +func (noopStage) RequiredLabelNames() []string { return []string{} } + +type StageFunc struct { + process func(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) + requiredLabels []string +} + +func (fn StageFunc) Process(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + return fn.process(ts, line, lbs) +} + +func (fn StageFunc) RequiredLabelNames() []string { + if fn.requiredLabels == nil { + return []string{} + } + return fn.requiredLabels +} + +// pipeline is a combinations of multiple stages. +// It can also be reduced into a single stage for convenience. +type pipeline struct { + AnalyzablePipeline + stages []Stage + baseBuilder *BaseLabelsBuilder + mu sync.RWMutex + + streamPipelines map[uint64]StreamPipeline +} + +func (p *pipeline) Stages() []Stage { + return p.stages +} + +func (p *pipeline) LabelsBuilder() *BaseLabelsBuilder { + return p.baseBuilder +} + +type AnalyzablePipeline interface { + Pipeline + Stages() []Stage + LabelsBuilder() *BaseLabelsBuilder +} + +// NewPipeline creates a new pipeline for a given set of stages. +func NewPipeline(stages []Stage) Pipeline { + if len(stages) == 0 { + return NewNoopPipeline() + } + + hints := NewParserHint(nil, nil, false, false, "", stages) + builder := NewBaseLabelsBuilderWithGrouping(nil, hints, false, false) + return &pipeline{ + stages: stages, + baseBuilder: builder, + streamPipelines: make(map[uint64]StreamPipeline), + } +} + +type streamPipeline struct { + stages []Stage + builder *LabelsBuilder +} + +func NewStreamPipeline(stages []Stage, labelsBuilder *LabelsBuilder) StreamPipeline { + return &streamPipeline{stages, labelsBuilder} +} + +func (p *pipeline) ForStream(labels labels.Labels) StreamPipeline { + hash := p.baseBuilder.Hash(labels) + + p.mu.RLock() + if res, ok := p.streamPipelines[hash]; ok { + p.mu.RUnlock() + return res + } + p.mu.RUnlock() + + res := NewStreamPipeline(p.stages, p.baseBuilder.ForLabels(labels, hash)) + + p.mu.Lock() + defer p.mu.Unlock() + + p.streamPipelines[hash] = res + return res +} + +func (p *pipeline) Reset() { + p.mu.Lock() + defer p.mu.Unlock() + + p.baseBuilder.Reset() + for k := range p.streamPipelines { + delete(p.streamPipelines, k) + } +} + +func (p *streamPipeline) Process(ts int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) { + var ok bool + p.builder.Reset() + p.builder.Add(StructuredMetadataLabel, structuredMetadata...) + + for _, s := range p.stages { + line, ok = s.Process(ts, line, p.builder) + if !ok { + return nil, nil, false + } + } + return line, p.builder.LabelsResult(), true +} + +func (p *streamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) { + // Stages only read from the line. + lb, lr, ok := p.Process(ts, unsafeGetBytes(line), structuredMetadata...) + // but the returned line needs to be copied. + return string(lb), lr, ok +} + +func (p *streamPipeline) BaseLabels() LabelsResult { return p.builder.currentResult } + +// PipelineFilter contains a set of matchers and a pipeline that, when matched, +// causes an entry from a log stream to be skipped. Matching entries must also +// fall between 'start' and 'end', inclusive +type PipelineFilter struct { + Start int64 + End int64 + Matchers []*labels.Matcher + Pipeline Pipeline +} + +// NewFilteringPipeline creates a pipeline where entries from the underlying +// log stream are filtered by pipeline filters before being passed to the +// pipeline representing the queried data. Filters are always upstream of the +// pipeline +func NewFilteringPipeline(f []PipelineFilter, p Pipeline) Pipeline { + return &filteringPipeline{ + filters: f, + pipeline: p, + } +} + +type filteringPipeline struct { + filters []PipelineFilter + pipeline Pipeline +} + +func (p *filteringPipeline) ForStream(labels labels.Labels) StreamPipeline { + var streamFilters []streamFilter + for _, f := range p.filters { + if allMatch(f.Matchers, labels) { + streamFilters = append(streamFilters, streamFilter{ + start: f.Start, + end: f.End, + pipeline: f.Pipeline.ForStream(labels), + }) + } + } + + return &filteringStreamPipeline{ + filters: streamFilters, + pipeline: p.pipeline.ForStream(labels), + } +} + +func (p *filteringPipeline) Reset() { + p.pipeline.Reset() +} + +func allMatch(matchers []*labels.Matcher, labels labels.Labels) bool { + for _, m := range matchers { + if !m.Matches(labels.Get(m.Name)) { + return false + } + } + return true +} + +type streamFilter struct { + start int64 + end int64 + pipeline StreamPipeline +} + +type filteringStreamPipeline struct { + filters []streamFilter + pipeline StreamPipeline +} + +func (sp *filteringStreamPipeline) BaseLabels() LabelsResult { + return sp.pipeline.BaseLabels() +} + +func (sp *filteringStreamPipeline) Process(ts int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) { + for _, filter := range sp.filters { + if ts < filter.start || ts > filter.end { + continue + } + + _, _, matches := filter.pipeline.Process(ts, line, structuredMetadata...) + if matches { // When the filter matches, don't run the next step + return nil, nil, false + } + } + + return sp.pipeline.Process(ts, line, structuredMetadata...) +} + +func (sp *filteringStreamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) { + for _, filter := range sp.filters { + if ts < filter.start || ts > filter.end { + continue + } + + _, _, matches := filter.pipeline.ProcessString(ts, line, structuredMetadata...) + if matches { // When the filter matches, don't run the next step + return "", nil, false + } + } + + return sp.pipeline.ProcessString(ts, line, structuredMetadata...) +} + +// ReduceStages reduces multiple stages into one. +func ReduceStages(stages []Stage) Stage { + if len(stages) == 0 { + return NoopStage + } + var requiredLabelNames []string + for _, s := range stages { + requiredLabelNames = append(requiredLabelNames, s.RequiredLabelNames()...) + } + return StageFunc{ + process: func(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + var ok bool + for _, p := range stages { + line, ok = p.Process(ts, line, lbs) + if !ok { + return nil, false + } + } + return line, true + }, + requiredLabels: requiredLabelNames, + } +} + +func unsafeGetBytes(s string) []byte { + var buf []byte + p := unsafe.Pointer(&buf) + *(*string)(p) = s + (*reflect.SliceHeader)(p).Cap = len(s) + return buf +} + +func unsafeGetString(buf []byte) string { + return *((*string)(unsafe.Pointer(&buf))) +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/log/util.go b/vendor/github.com/grafana/loki/pkg/logql/log/util.go new file mode 100644 index 00000000..c4115eb0 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/log/util.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "strings" +) + +func uniqueString(s []string) []string { + unique := make(map[string]bool, len(s)) + us := make([]string, 0, len(s)) + for _, elem := range s { + if len(elem) != 0 { + if !unique[elem] { + us = append(us, elem) + unique[elem] = true + } + } + } + return us +} + +func sanitizeLabelKey(key string, isPrefix bool) string { + if len(key) == 0 { + return key + } + key = strings.TrimSpace(key) + if len(key) == 0 { + return key + } + if isPrefix && key[0] >= '0' && key[0] <= '9' { + key = "_" + key + } + return strings.Map(func(r rune) rune { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || (r >= '0' && r <= '9') { + return r + } + return '_' + }, key) +} + +// appendSanitize appends the sanitized key to the slice. +func appendSanitized(to, key []byte) []byte { + if len(key) == 0 { + return to + } + key = bytes.TrimSpace(key) + + if len(to) == 0 && key[0] >= '0' && key[0] <= '9' { + to = append(to, '_') + } + // range over rune + + for _, r := range string(key) { + if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || (r >= '0' && r <= '9')) { + to = append(to, '_') + continue + } + to = append(to, byte(r)) + + } + return to +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/ast.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/ast.go new file mode 100644 index 00000000..e110b372 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/ast.go @@ -0,0 +1,2271 @@ +package syntax + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grafana/loki/pkg/util" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + + "github.com/grafana/regexp/syntax" + + "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/logqlmodel" +) + +// Expr is the root expression which can be a SampleExpr or LogSelectorExpr +// +//sumtype:decl +type Expr interface { + logQLExpr() // ensure it's not implemented accidentally + Shardable() bool // A recursive check on the AST to see if it's shardable. + Walkable + fmt.Stringer + AcceptVisitor + + // Pretty prettyfies any LogQL expression at given `level` of the whole LogQL query. + Pretty(level int) string +} + +func Clone[T Expr](e T) (T, error) { + var empty T + v := &cloneVisitor{} + e.Accept(v) + cast, ok := v.cloned.(T) + if !ok { + return empty, fmt.Errorf("unexpected type of cloned expression: want %T, got %T", empty, v.cloned) + } + return cast, nil +} + +func MustClone[T Expr](e T) T { + copied, err := Clone[T](e) + if err != nil { + panic(err) + } + return copied +} + +// implicit holds default implementations +type implicit struct{} + +func (implicit) logQLExpr() {} + +// LogSelectorExpr is a LogQL expression filtering and returning logs. +// +//sumtype:decl +type LogSelectorExpr interface { + Matchers() []*labels.Matcher + Pipeline() (Pipeline, error) + HasFilter() bool + Expr + + isLogSelectorExpr() +} + +// Type alias for backward compatibility +type ( + Pipeline = log.Pipeline + SampleExtractor = log.SampleExtractor +) + +// StageExpr is an expression defining a single step into a log pipeline +// +//sumtype:decl +type StageExpr interface { + Stage() (log.Stage, error) + Expr + + isStageExpr() +} + +// MultiStageExpr is multiple stages which implements a LogSelectorExpr. +type MultiStageExpr []StageExpr + +func (m MultiStageExpr) Pipeline() (log.Pipeline, error) { + stages, err := m.stages() + if err != nil { + return nil, err + } + return log.NewPipeline(stages), nil +} + +func (m MultiStageExpr) stages() ([]log.Stage, error) { + c := make([]log.Stage, 0, len(m)) + for _, e := range m.reorderStages() { + p, err := e.Stage() + if err != nil { + return nil, logqlmodel.NewStageError(e.String(), err) + } + if p == log.NoopStage { + continue + } + c = append(c, p) + } + return c, nil +} + +// reorderStages reorders m such that LineFilters +// are as close to the front of the filter as possible. +func (m MultiStageExpr) reorderStages() []StageExpr { + var ( + result = make([]StageExpr, 0, len(m)) + filters = make([]*LineFilterExpr, 0, len(m)) + rest = make([]StageExpr, 0, len(m)) + ) + + for _, s := range m { + switch f := s.(type) { + case *LineFilterExpr: + filters = append(filters, f) + case *LineFmtExpr: + // line_format modifies the contents of the line so any line filter + // originally after a line_format must still be after the same + // line_format. + + rest = append(rest, f) + + if len(filters) > 0 { + result = append(result, combineFilters(filters)) + } + result = append(result, rest...) + + filters = filters[:0] + rest = rest[:0] + case *LabelParserExpr: + rest = append(rest, f) + + // unpack modifies the contents of the line so any line filter + // originally after an unpack must still be after the same + // unpack. + if f.Op == OpParserTypeUnpack { + if len(filters) > 0 { + result = append(result, combineFilters(filters)) + } + result = append(result, rest...) + + filters = filters[:0] + rest = rest[:0] + } + default: + rest = append(rest, f) + } + } + + if len(filters) > 0 { + result = append(result, combineFilters(filters)) + } + return append(result, rest...) +} + +func combineFilters(in []*LineFilterExpr) StageExpr { + result := in[len(in)-1] + for i := len(in) - 2; i >= 0; i-- { + leafNode(result).Left = in[i] + } + + return result +} + +func leafNode(in *LineFilterExpr) *LineFilterExpr { + current := in + //nolint:revive + for ; current.Left != nil; current = current.Left { + } + return current +} + +func (m MultiStageExpr) String() string { + var sb strings.Builder + for i, e := range m { + sb.WriteString(e.String()) + if i+1 != len(m) { + sb.WriteString(" ") + } + } + return sb.String() +} + +func (MultiStageExpr) logQLExpr() {} // nolint:unused + +type MatchersExpr struct { + Mts []*labels.Matcher + implicit +} + +func newMatcherExpr(matchers []*labels.Matcher) *MatchersExpr { + return &MatchersExpr{Mts: matchers} +} + +func (e *MatchersExpr) isLogSelectorExpr() {} + +func (e *MatchersExpr) Matchers() []*labels.Matcher { + return e.Mts +} + +func (e *MatchersExpr) AppendMatchers(m []*labels.Matcher) { + e.Mts = append(e.Mts, m...) +} + +func (e *MatchersExpr) Shardable() bool { return true } + +func (e *MatchersExpr) Walk(f WalkFn) { f(e) } + +func (e *MatchersExpr) Accept(v RootVisitor) { v.VisitMatchers(e) } + +func (e *MatchersExpr) String() string { + var sb strings.Builder + sb.WriteString("{") + for i, m := range e.Mts { + sb.WriteString(m.String()) + if i+1 != len(e.Mts) { + sb.WriteString(", ") + } + } + sb.WriteString("}") + return sb.String() +} + +func (e *MatchersExpr) Pipeline() (log.Pipeline, error) { + return log.NewNoopPipeline(), nil +} + +func (e *MatchersExpr) HasFilter() bool { + return false +} + +type PipelineExpr struct { + MultiStages MultiStageExpr + Left *MatchersExpr + implicit +} + +func newPipelineExpr(left *MatchersExpr, pipeline MultiStageExpr) LogSelectorExpr { + return &PipelineExpr{ + Left: left, + MultiStages: pipeline, + } +} + +func (e *PipelineExpr) isLogSelectorExpr() {} + +func (e *PipelineExpr) Shardable() bool { + for _, p := range e.MultiStages { + if !p.Shardable() { + return false + } + } + return true +} + +func (e *PipelineExpr) Walk(f WalkFn) { + f(e) + + if e.Left == nil { + return + } + + xs := make([]Walkable, 0, len(e.MultiStages)+1) + xs = append(xs, e.Left) + for _, p := range e.MultiStages { + xs = append(xs, p) + } + walkAll(f, xs...) +} + +func (e *PipelineExpr) Accept(v RootVisitor) { v.VisitPipeline(e) } + +func (e *PipelineExpr) Matchers() []*labels.Matcher { + return e.Left.Matchers() +} + +func (e *PipelineExpr) String() string { + var sb strings.Builder + sb.WriteString(e.Left.String()) + sb.WriteString(" ") + sb.WriteString(e.MultiStages.String()) + return sb.String() +} + +func (e *PipelineExpr) Pipeline() (log.Pipeline, error) { + return e.MultiStages.Pipeline() +} + +// HasFilter returns true if the pipeline contains stage that can filter out lines. +func (e *PipelineExpr) HasFilter() bool { + for _, p := range e.MultiStages { + switch p.(type) { + case *LineFilterExpr, *LabelFilterExpr: + return true + default: + continue + } + } + return false +} + +type LineFilterExpr struct { + Left *LineFilterExpr + Or *LineFilterExpr + IsOrChild bool + Ty labels.MatchType + Match string + Op string + implicit +} + +func newLineFilterExpr(ty labels.MatchType, op, match string) *LineFilterExpr { + return &LineFilterExpr{ + Ty: ty, + Match: match, + Op: op, + } +} + +func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { + right.Ty = left.Ty + + if left.Ty == labels.MatchEqual || left.Ty == labels.MatchRegexp { + left.Or = right + right.IsOrChild = true + return left + } + + // !(left or right) == (!left and !right). + return newNestedLineFilterExpr(left, right) +} + +func newNestedLineFilterExpr(left *LineFilterExpr, right *LineFilterExpr) *LineFilterExpr { + return &LineFilterExpr{ + Left: left, + Ty: right.Ty, + Match: right.Match, + Op: right.Op, + } +} + +func (*LineFilterExpr) isStageExpr() {} + +func (e *LineFilterExpr) Walk(f WalkFn) { + f(e) + if e.Left == nil { + return + } + e.Left.Walk(f) +} + +func (e *LineFilterExpr) Accept(v RootVisitor) { + v.VisitLineFilter(e) +} + +// AddFilterExpr adds a filter expression to a logselector expression. +func AddFilterExpr(expr LogSelectorExpr, ty labels.MatchType, op, match string) (LogSelectorExpr, error) { + filter := newLineFilterExpr(ty, op, match) + switch e := expr.(type) { + case *MatchersExpr: + return newPipelineExpr(e, MultiStageExpr{filter}), nil + case *PipelineExpr: + e.MultiStages = append(e.MultiStages, filter) + return e, nil + default: + return nil, fmt.Errorf("unknown LogSelector: %v+", expr) + } +} + +func (e *LineFilterExpr) Shardable() bool { return true } + +func (e *LineFilterExpr) String() string { + var sb strings.Builder + if e.Left != nil { + sb.WriteString(e.Left.String()) + sb.WriteString(" ") + } + + if !e.IsOrChild { // Only write the type when we're not chaining "or" filters + switch e.Ty { + case labels.MatchRegexp: + sb.WriteString("|~") + case labels.MatchNotRegexp: + sb.WriteString("!~") + case labels.MatchEqual: + sb.WriteString("|=") + case labels.MatchNotEqual: + sb.WriteString("!=") + } + sb.WriteString(" ") + } + + if e.Op == "" { + sb.WriteString(strconv.Quote(e.Match)) + } else { + sb.WriteString(e.Op) + sb.WriteString("(") + sb.WriteString(strconv.Quote(e.Match)) + sb.WriteString(")") + } + + if e.Or != nil { + sb.WriteString(" or ") + // This is dirty but removes the leading MatchType from the or expression. + sb.WriteString(e.Or.String()) + } + + return sb.String() +} + +func (e *LineFilterExpr) Filter() (log.Filterer, error) { + acc := make([]log.Filterer, 0) + for curr := e; curr != nil; curr = curr.Left { + var next log.Filterer + var err error + if curr.Or != nil { + next, err = newOrFilter(curr) + if err != nil { + return nil, err + } + acc = append(acc, next) + } else { + switch curr.Op { + case OpFilterIP: + next, err := log.NewIPLineFilter(curr.Match, curr.Ty) + if err != nil { + return nil, err + } + acc = append(acc, next) + default: + next, err = log.NewFilter(curr.Match, curr.Ty) + if err != nil { + return nil, err + } + + acc = append(acc, next) + } + } + } + + if len(acc) == 1 { + return acc[0], nil + } + + // The accumulation is right to left so it needs to be reversed. + for i := len(acc)/2 - 1; i >= 0; i-- { + opp := len(acc) - 1 - i + acc[i], acc[opp] = acc[opp], acc[i] + } + + return log.NewAndFilters(acc), nil +} + +func newOrFilter(f *LineFilterExpr) (log.Filterer, error) { + orFilter, err := log.NewFilter(f.Match, f.Ty) + if err != nil { + return nil, err + } + + for or := f.Or; or != nil; or = or.Or { + filter, err := log.NewFilter(or.Match, or.Ty) + if err != nil { + return nil, err + } + orFilter = log.ChainOrFilter(orFilter, filter) + } + + return orFilter, nil +} + +func (e *LineFilterExpr) Stage() (log.Stage, error) { + f, err := e.Filter() + if err != nil { + return nil, err + } + return f.ToStage(), nil +} + +type LogfmtParserExpr struct { + Strict bool + KeepEmpty bool + + implicit +} + +func newLogfmtParserExpr(flags []string) *LogfmtParserExpr { + e := LogfmtParserExpr{} + for _, f := range flags { + switch f { + case OpStrict: + e.Strict = true + case OpKeepEmpty: + e.KeepEmpty = true + } + } + + return &e +} + +func (*LogfmtParserExpr) isStageExpr() {} + +func (e *LogfmtParserExpr) Shardable() bool { return true } + +func (e *LogfmtParserExpr) Walk(f WalkFn) { f(e) } + +func (e *LogfmtParserExpr) Accept(v RootVisitor) { v.VisitLogfmtParser(e) } + +func (e *LogfmtParserExpr) Stage() (log.Stage, error) { + return log.NewLogfmtParser(e.Strict, e.KeepEmpty), nil +} + +func (e *LogfmtParserExpr) String() string { + var sb strings.Builder + sb.WriteString(OpPipe) + sb.WriteString(" ") + sb.WriteString(OpParserTypeLogfmt) + + if e.Strict { + sb.WriteString(" ") + sb.WriteString(OpStrict) + } + + if e.KeepEmpty { + sb.WriteString(" ") + sb.WriteString(OpKeepEmpty) + } + + return sb.String() +} + +type LabelParserExpr struct { + Op string + Param string + implicit +} + +func newLabelParserExpr(op, param string) *LabelParserExpr { + if op == OpParserTypeRegexp { + _, err := log.NewRegexpParser(param) + if err != nil { + panic(logqlmodel.NewParseError(fmt.Sprintf("invalid regexp parser: %s", err.Error()), 0, 0)) + } + } + if op == OpParserTypePattern { + _, err := log.NewPatternParser(param) + if err != nil { + panic(logqlmodel.NewParseError(fmt.Sprintf("invalid pattern parser: %s", err.Error()), 0, 0)) + } + } + + return &LabelParserExpr{ + Op: op, + Param: param, + } +} + +func (*LabelParserExpr) isStageExpr() {} + +func (e *LabelParserExpr) Shardable() bool { return true } + +func (e *LabelParserExpr) Walk(f WalkFn) { f(e) } + +func (e *LabelParserExpr) Accept(v RootVisitor) { v.VisitLabelParser(e) } + +func (e *LabelParserExpr) Stage() (log.Stage, error) { + switch e.Op { + case OpParserTypeJSON: + return log.NewJSONParser(), nil + case OpParserTypeRegexp: + return log.NewRegexpParser(e.Param) + case OpParserTypeUnpack: + return log.NewUnpackParser(), nil + case OpParserTypePattern: + return log.NewPatternParser(e.Param) + default: + return nil, fmt.Errorf("unknown parser operator: %s", e.Op) + } +} + +func (e *LabelParserExpr) String() string { + var sb strings.Builder + sb.WriteString(OpPipe) + sb.WriteString(" ") + sb.WriteString(e.Op) + if e.Param != "" { + sb.WriteString(" ") + sb.WriteString(strconv.Quote(e.Param)) + } + if (e.Op == OpParserTypeRegexp || e.Op == OpParserTypePattern) && e.Param == "" { + sb.WriteString(" \"\"") + } + return sb.String() +} + +type LabelFilterExpr struct { + log.LabelFilterer + implicit +} + +func newLabelFilterExpr(filterer log.LabelFilterer) *LabelFilterExpr { + return &LabelFilterExpr{ + LabelFilterer: filterer, + } +} + +func (*LabelFilterExpr) isStageExpr() {} + +func (e *LabelFilterExpr) Shardable() bool { return true } + +func (e *LabelFilterExpr) Walk(f WalkFn) { f(e) } + +func (e *LabelFilterExpr) Accept(v RootVisitor) { v.VisitLabelFilter(e) } + +func (e *LabelFilterExpr) Stage() (log.Stage, error) { + switch ip := e.LabelFilterer.(type) { + case *log.IPLabelFilter: + return ip, ip.PatternError() + case *log.NoopLabelFilter: + return log.NoopStage, nil + } + return e.LabelFilterer, nil +} + +func (e *LabelFilterExpr) String() string { + return fmt.Sprintf("%s %s", OpPipe, e.LabelFilterer.String()) +} + +type LineFmtExpr struct { + Value string + implicit +} + +func newLineFmtExpr(value string) *LineFmtExpr { + return &LineFmtExpr{ + Value: value, + } +} + +type DecolorizeExpr struct { + implicit +} + +func newDecolorizeExpr() *DecolorizeExpr { + return &DecolorizeExpr{} +} + +func (*DecolorizeExpr) isStageExpr() {} + +func (e *DecolorizeExpr) Shardable() bool { return true } + +func (e *DecolorizeExpr) Stage() (log.Stage, error) { + return log.NewDecolorizer() +} +func (e *DecolorizeExpr) String() string { + return fmt.Sprintf("%s %s", OpPipe, OpDecolorize) +} +func (e *DecolorizeExpr) Walk(f WalkFn) { f(e) } + +func (e *DecolorizeExpr) Accept(v RootVisitor) { v.VisitDecolorize(e) } + +type DropLabelsExpr struct { + dropLabels []log.DropLabel + implicit +} + +func newDropLabelsExpr(dropLabels []log.DropLabel) *DropLabelsExpr { + return &DropLabelsExpr{dropLabels: dropLabels} +} + +func (*DropLabelsExpr) isStageExpr() {} + +func (e *DropLabelsExpr) Shardable() bool { return true } + +func (e *DropLabelsExpr) Stage() (log.Stage, error) { + return log.NewDropLabels(e.dropLabels), nil +} +func (e *DropLabelsExpr) String() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpDrop)) + + for i, dropLabel := range e.dropLabels { + if dropLabel.Matcher != nil { + sb.WriteString(dropLabel.Matcher.String()) + if i+1 != len(e.dropLabels) { + sb.WriteString(",") + } + } + if dropLabel.Name != "" { + sb.WriteString(dropLabel.Name) + if i+1 != len(e.dropLabels) { + sb.WriteString(",") + } + } + } + str := sb.String() + return str +} +func (e *DropLabelsExpr) Walk(f WalkFn) { f(e) } + +func (e *DropLabelsExpr) Accept(v RootVisitor) { v.VisitDropLabels(e) } + +type KeepLabelsExpr struct { + keepLabels []log.KeepLabel + implicit +} + +func newKeepLabelsExpr(keepLabels []log.KeepLabel) *KeepLabelsExpr { + return &KeepLabelsExpr{keepLabels: keepLabels} +} + +func (*KeepLabelsExpr) isStageExpr() {} + +func (e *KeepLabelsExpr) Shardable() bool { return true } + +func (e *KeepLabelsExpr) Stage() (log.Stage, error) { + return log.NewKeepLabels(e.keepLabels), nil +} + +func (e *KeepLabelsExpr) String() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpKeep)) + + for i, keepLabel := range e.keepLabels { + if keepLabel.Matcher != nil { + sb.WriteString(keepLabel.Matcher.String()) + if i+1 != len(e.keepLabels) { + sb.WriteString(",") + } + } + if keepLabel.Name != "" { + sb.WriteString(keepLabel.Name) + if i+1 != len(e.keepLabels) { + sb.WriteString(",") + } + } + } + str := sb.String() + return str +} + +func (e *KeepLabelsExpr) Walk(f WalkFn) { f(e) } + +func (e *KeepLabelsExpr) Accept(v RootVisitor) { v.VisitKeepLabel(e) } + +func (*LineFmtExpr) isStageExpr() {} + +func (e *LineFmtExpr) Shardable() bool { return true } + +func (e *LineFmtExpr) Walk(f WalkFn) { f(e) } + +func (e *LineFmtExpr) Accept(v RootVisitor) { v.VisitLineFmt(e) } + +func (e *LineFmtExpr) Stage() (log.Stage, error) { + return log.NewFormatter(e.Value) +} + +func (e *LineFmtExpr) String() string { + return fmt.Sprintf("%s %s %s", OpPipe, OpFmtLine, strconv.Quote(e.Value)) +} + +type LabelFmtExpr struct { + Formats []log.LabelFmt + implicit +} + +func newLabelFmtExpr(fmts []log.LabelFmt) *LabelFmtExpr { + return &LabelFmtExpr{ + Formats: fmts, + } +} + +func (*LabelFmtExpr) isStageExpr() {} + +func (e *LabelFmtExpr) Shardable() bool { + // While LabelFmt is shardable in certain cases, it is not always, + // but this is left to the shardmapper to determine + return true +} + +func (e *LabelFmtExpr) Walk(f WalkFn) { f(e) } + +func (e *LabelFmtExpr) Accept(v RootVisitor) { v.VisitLabelFmt(e) } + +func (e *LabelFmtExpr) Stage() (log.Stage, error) { + return log.NewLabelsFormatter(e.Formats) +} + +func (e *LabelFmtExpr) String() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpFmtLabel)) + + for i, f := range e.Formats { + sb.WriteString(f.Name) + sb.WriteString("=") + if f.Rename { + sb.WriteString(f.Value) + } else { + sb.WriteString(strconv.Quote(f.Value)) + } + if i+1 != len(e.Formats) { + sb.WriteString(",") + } + } + return sb.String() +} + +type JSONExpressionParser struct { + Expressions []log.LabelExtractionExpr + + implicit +} + +func newJSONExpressionParser(expressions []log.LabelExtractionExpr) *JSONExpressionParser { + return &JSONExpressionParser{ + Expressions: expressions, + } +} + +func (*JSONExpressionParser) isStageExpr() {} + +func (j *JSONExpressionParser) Shardable() bool { return true } + +func (j *JSONExpressionParser) Walk(f WalkFn) { f(j) } + +func (j *JSONExpressionParser) Accept(v RootVisitor) { v.VisitJSONExpressionParser(j) } + +func (j *JSONExpressionParser) Stage() (log.Stage, error) { + return log.NewJSONExpressionParser(j.Expressions) +} + +func (j *JSONExpressionParser) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpParserTypeJSON)) + for i, exp := range j.Expressions { + sb.WriteString(exp.Identifier) + sb.WriteString("=") + sb.WriteString(strconv.Quote(exp.Expression)) + + if i+1 != len(j.Expressions) { + sb.WriteString(",") + } + } + return sb.String() +} + +type internedStringSet map[string]struct { + s string + ok bool +} + +type LogfmtExpressionParser struct { + Expressions []log.LabelExtractionExpr + Strict, KeepEmpty bool + + implicit +} + +func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr, flags []string) *LogfmtExpressionParser { + e := LogfmtExpressionParser{ + Expressions: expressions, + } + + for _, flag := range flags { + switch flag { + case OpStrict: + e.Strict = true + case OpKeepEmpty: + e.KeepEmpty = true + } + } + + return &e +} + +func (*LogfmtExpressionParser) isStageExpr() {} + +func (l *LogfmtExpressionParser) Shardable() bool { return true } + +func (l *LogfmtExpressionParser) Walk(f WalkFn) { f(l) } + +func (l *LogfmtExpressionParser) Accept(v RootVisitor) { v.VisitLogfmtExpressionParser(l) } + +func (l *LogfmtExpressionParser) Stage() (log.Stage, error) { + return log.NewLogfmtExpressionParser(l.Expressions, l.Strict) +} + +func (l *LogfmtExpressionParser) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpParserTypeLogfmt)) + if l.Strict { + sb.WriteString(OpStrict) + sb.WriteString(" ") + } + + if l.KeepEmpty { + sb.WriteString(OpKeepEmpty) + sb.WriteString(" ") + } + + for i, exp := range l.Expressions { + sb.WriteString(exp.Identifier) + sb.WriteString("=") + sb.WriteString(strconv.Quote(exp.Expression)) + + if i+1 != len(l.Expressions) { + sb.WriteString(",") + } + } + return sb.String() +} + +func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher { + m, err := labels.NewMatcher(t, n, v) + if err != nil { + panic(logqlmodel.NewParseError(err.Error(), 0, 0)) + } + return m +} + +// simplify will return an equals matcher if there is a regex matching a literal +func simplify(typ labels.MatchType, name string, reg *syntax.Regexp) (*labels.Matcher, bool) { + switch reg.Op { + case syntax.OpLiteral: + if !util.IsCaseInsensitive(reg) { + t := labels.MatchEqual + if typ == labels.MatchNotRegexp { + t = labels.MatchNotEqual + } + return labels.MustNewMatcher(t, name, string(reg.Rune)), true + } + return nil, false + } + return nil, false +} + +func mustNewFloat(s string) float64 { + n, err := strconv.ParseFloat(s, 64) + if err != nil { + panic(logqlmodel.NewParseError(fmt.Sprintf("unable to parse float: %s", err.Error()), 0, 0)) + } + return n +} + +type UnwrapExpr struct { + Identifier string + Operation string + + PostFilters []log.LabelFilterer +} + +func (u UnwrapExpr) String() string { + var sb strings.Builder + if u.Operation != "" { + sb.WriteString(fmt.Sprintf(" %s %s %s(%s)", OpPipe, OpUnwrap, u.Operation, u.Identifier)) + } else { + sb.WriteString(fmt.Sprintf(" %s %s %s", OpPipe, OpUnwrap, u.Identifier)) + } + for _, f := range u.PostFilters { + sb.WriteString(fmt.Sprintf(" %s %s", OpPipe, f)) + } + return sb.String() +} + +func (u *UnwrapExpr) addPostFilter(f log.LabelFilterer) *UnwrapExpr { + u.PostFilters = append(u.PostFilters, f) + return u +} + +func newUnwrapExpr(id string, operation string) *UnwrapExpr { + return &UnwrapExpr{Identifier: id, Operation: operation} +} + +type LogRange struct { + Left LogSelectorExpr + Interval time.Duration + Offset time.Duration + + Unwrap *UnwrapExpr + + implicit +} + +// impls Stringer +func (r LogRange) String() string { + var sb strings.Builder + sb.WriteString(r.Left.String()) + if r.Unwrap != nil { + sb.WriteString(r.Unwrap.String()) + } + sb.WriteString(fmt.Sprintf("[%v]", model.Duration(r.Interval))) + if r.Offset != 0 { + offsetExpr := OffsetExpr{Offset: r.Offset} + sb.WriteString(offsetExpr.String()) + } + return sb.String() +} + +func (r *LogRange) Shardable() bool { return r.Left.Shardable() } + +func (r *LogRange) Walk(f WalkFn) { + f(r) + if r.Left == nil { + return + } + r.Left.Walk(f) +} + +func (r *LogRange) Accept(v RootVisitor) { + v.VisitLogRange(r) +} + +// WithoutUnwrap returns a copy of the log range without the unwrap statement. +func (r *LogRange) WithoutUnwrap() (*LogRange, error) { + left, err := Clone(r.Left) + if err != nil { + return nil, err + } + return &LogRange{ + Left: left, + Interval: r.Interval, + Offset: r.Offset, + }, nil +} + +func newLogRange(left LogSelectorExpr, interval time.Duration, u *UnwrapExpr, o *OffsetExpr) *LogRange { + var offset time.Duration + if o != nil { + offset = o.Offset + } + return &LogRange{ + Left: left, + Interval: interval, + Unwrap: u, + Offset: offset, + } +} + +type OffsetExpr struct { + Offset time.Duration +} + +func (o *OffsetExpr) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf(" %s %s", OpOffset, o.Offset.String())) + return sb.String() +} + +func newOffsetExpr(offset time.Duration) *OffsetExpr { + return &OffsetExpr{ + Offset: offset, + } +} + +const ( + // vector ops + OpTypeSum = "sum" + OpTypeAvg = "avg" + OpTypeMax = "max" + OpTypeMin = "min" + OpTypeCount = "count" + OpTypeStddev = "stddev" + OpTypeStdvar = "stdvar" + OpTypeBottomK = "bottomk" + OpTypeTopK = "topk" + OpTypeSort = "sort" + OpTypeSortDesc = "sort_desc" + + // range vector ops + OpRangeTypeCount = "count_over_time" + OpRangeTypeRate = "rate" + OpRangeTypeRateCounter = "rate_counter" + OpRangeTypeBytes = "bytes_over_time" + OpRangeTypeBytesRate = "bytes_rate" + OpRangeTypeAvg = "avg_over_time" + OpRangeTypeSum = "sum_over_time" + OpRangeTypeMin = "min_over_time" + OpRangeTypeMax = "max_over_time" + OpRangeTypeStdvar = "stdvar_over_time" + OpRangeTypeStddev = "stddev_over_time" + OpRangeTypeQuantile = "quantile_over_time" + OpRangeTypeFirst = "first_over_time" + OpRangeTypeLast = "last_over_time" + OpRangeTypeAbsent = "absent_over_time" + + //vector + OpTypeVector = "vector" + + // binops - logical/set + OpTypeOr = "or" + OpTypeAnd = "and" + OpTypeUnless = "unless" + + // binops - arithmetic + OpTypeAdd = "+" + OpTypeSub = "-" + OpTypeMul = "*" + OpTypeDiv = "/" + OpTypeMod = "%" + OpTypePow = "^" + + // binops - comparison + OpTypeCmpEQ = "==" + OpTypeNEQ = "!=" + OpTypeGT = ">" + OpTypeGTE = ">=" + OpTypeLT = "<" + OpTypeLTE = "<=" + + // parsers + OpParserTypeJSON = "json" + OpParserTypeLogfmt = "logfmt" + OpParserTypeRegexp = "regexp" + OpParserTypeUnpack = "unpack" + OpParserTypePattern = "pattern" + + OpFmtLine = "line_format" + OpFmtLabel = "label_format" + OpDecolorize = "decolorize" + + OpPipe = "|" + OpUnwrap = "unwrap" + OpOffset = "offset" + + OpOn = "on" + OpIgnoring = "ignoring" + + OpGroupLeft = "group_left" + OpGroupRight = "group_right" + + // conversion Op + OpConvBytes = "bytes" + OpConvDuration = "duration" + OpConvDurationSeconds = "duration_seconds" + + OpLabelReplace = "label_replace" + + // function filters + OpFilterIP = "ip" + + // drop labels + OpDrop = "drop" + + // keep labels + OpKeep = "keep" + + // parser flags + OpStrict = "--strict" + OpKeepEmpty = "--keep-empty" + + // internal expressions not represented in LogQL. These are used to + // evaluate expressions differently resulting in intermediate formats + // that are not consumable by LogQL clients but are used for sharding. + OpRangeTypeQuantileSketch = "__quantile_sketch_over_time__" +) + +func IsComparisonOperator(op string) bool { + switch op { + case OpTypeCmpEQ, OpTypeNEQ, OpTypeGT, OpTypeGTE, OpTypeLT, OpTypeLTE: + return true + default: + return false + } +} + +// IsLogicalBinOp tests whether an operation is a logical/set binary operation +func IsLogicalBinOp(op string) bool { + switch op { + case OpTypeOr, OpTypeAnd, OpTypeUnless: + return true + default: + return false + } +} + +// SampleExpr is a LogQL expression filtering logs and returning metric samples. +// +//sumtype:decl +type SampleExpr interface { + // Selector is the LogQL selector to apply when retrieving logs. + Selector() (LogSelectorExpr, error) + Extractor() (SampleExtractor, error) + MatcherGroups() ([]MatcherRange, error) + Expr + isSampleExpr() +} + +// RangeAggregationExpr not all range vector aggregation expressions support grouping by/without label(s), +// therefore the Grouping struct can be nil. +type RangeAggregationExpr struct { + Left *LogRange + Operation string + + Params *float64 + Grouping *Grouping + err error + implicit +} + +func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, stringParams *string) SampleExpr { + var params *float64 + if stringParams != nil { + if operation != OpRangeTypeQuantile && operation != OpRangeTypeQuantileSketch { + return &RangeAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("parameter %s not supported for operation %s", *stringParams, operation), 0, 0)} + } + var err error + params = new(float64) + *params, err = strconv.ParseFloat(*stringParams, 64) + if err != nil { + return &RangeAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("invalid parameter for operation %s: %s", operation, err), 0, 0)} + } + + } else { + if operation == OpRangeTypeQuantile { + return &RangeAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("parameter required for operation %s", operation), 0, 0)} + } + } + e := &RangeAggregationExpr{ + Left: left, + Operation: operation, + Grouping: gr, + Params: params, + } + if err := e.validate(); err != nil { + return &RangeAggregationExpr{err: logqlmodel.NewParseError(err.Error(), 0, 0)} + } + return e +} +func (e *RangeAggregationExpr) isSampleExpr() {} + +func (e *RangeAggregationExpr) Selector() (LogSelectorExpr, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.Left, nil +} + +func (e *RangeAggregationExpr) MatcherGroups() ([]MatcherRange, error) { + if e.err != nil { + return nil, e.err + } + xs := e.Left.Left.Matchers() + if len(xs) > 0 { + return []MatcherRange{ + { + Matchers: xs, + Interval: e.Left.Interval, + Offset: e.Left.Offset, + }, + }, nil + } + return nil, nil +} + +func (e RangeAggregationExpr) validate() error { + if e.Grouping != nil { + switch e.Operation { + case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeQuantileSketch, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast: + default: + return fmt.Errorf("grouping not allowed for %s aggregation", e.Operation) + } + } + if e.Left.Unwrap != nil { + switch e.Operation { + case OpRangeTypeAvg, OpRangeTypeSum, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeStddev, + OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeRate, OpRangeTypeRateCounter, + OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast, OpRangeTypeQuantileSketch: + return nil + default: + return fmt.Errorf("invalid aggregation %s with unwrap", e.Operation) + } + } + switch e.Operation { + case OpRangeTypeBytes, OpRangeTypeBytesRate, OpRangeTypeCount, OpRangeTypeRate, OpRangeTypeAbsent: + return nil + default: + return fmt.Errorf("invalid aggregation %s without unwrap", e.Operation) + } +} + +func (e RangeAggregationExpr) Validate() error { + return e.validate() +} + +// impls Stringer +func (e *RangeAggregationExpr) String() string { + var sb strings.Builder + sb.WriteString(e.Operation) + sb.WriteString("(") + if e.Params != nil { + sb.WriteString(strconv.FormatFloat(*e.Params, 'f', -1, 64)) + sb.WriteString(",") + } + sb.WriteString(e.Left.String()) + sb.WriteString(")") + if e.Grouping != nil { + sb.WriteString(e.Grouping.String()) + } + return sb.String() +} + +// impl SampleExpr +func (e *RangeAggregationExpr) Shardable() bool { + return shardableOps[e.Operation] && e.Left.Shardable() +} + +func (e *RangeAggregationExpr) Walk(f WalkFn) { + f(e) + if e.Left == nil { + return + } + e.Left.Walk(f) +} + +func (e *RangeAggregationExpr) Accept(v RootVisitor) { v.VisitRangeAggregation(e) } + +// Grouping struct represents the grouping by/without label(s) for vector aggregators and range vector aggregators. +// The representation is as follows: +// - No Grouping (labels dismissed): () => Grouping{Without: false, Groups: nil} +// - Grouping by empty label set: by () () => Grouping{Without: false, Groups: []} +// - Grouping by label set: by () () => Grouping{Without: false, Groups: []} +// - Grouping without empty label set: without () () => Grouping{Without: true, Groups: []} +// - Grouping without label set: without () () => Grouping{Without: true, Groups: []} +type Grouping struct { + Groups []string + Without bool +} + +// impls Stringer +func (g Grouping) String() string { + var sb strings.Builder + + if g.Without { + sb.WriteString(" without ") + } else { + sb.WriteString(" by ") + } + + sb.WriteString("(") + sb.WriteString(strings.Join(g.Groups, ",")) + sb.WriteString(")") + + return sb.String() +} + +// whether grouping doesn't change the result +func (g Grouping) Noop() bool { + return len(g.Groups) == 0 && g.Without +} + +// whether grouping reduces the result to a single value +// with no labels +func (g Grouping) Singleton() bool { + return len(g.Groups) == 0 && !g.Without +} + +// VectorAggregationExpr all vector aggregation expressions support grouping by/without label(s), +// therefore the Grouping struct can never be nil. +type VectorAggregationExpr struct { + Left SampleExpr `json:"sample_expr"` + + Grouping *Grouping `json:"grouping,omitempty"` + Params int `json:"params"` + Operation string `json:"operation"` + err error + implicit +} + +func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *Grouping, params *string) SampleExpr { + var p int + var err error + switch operation { + case OpTypeBottomK, OpTypeTopK: + if params == nil { + return &VectorAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("parameter required for operation %s", operation), 0, 0)} + } + p, err = strconv.Atoi(*params) + if err != nil { + return &VectorAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("invalid parameter %s(%s,", operation, *params), 0, 0)} + } + if p <= 0 { + return &VectorAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("invalid parameter (must be greater than 0) %s(%s", operation, *params), 0, 0)} + } + + default: + if params != nil { + return &VectorAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("unsupported parameter for operation %s(%s,", operation, *params), 0, 0)} + } + } + if gr == nil { + gr = &Grouping{} + } + return &VectorAggregationExpr{ + Left: left, + Operation: operation, + Grouping: gr, + Params: p, + } +} + +func (e *VectorAggregationExpr) isSampleExpr() {} + +func (e *VectorAggregationExpr) MatcherGroups() ([]MatcherRange, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.MatcherGroups() +} + +func (e *VectorAggregationExpr) Selector() (LogSelectorExpr, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.Selector() +} + +func (e *VectorAggregationExpr) Extractor() (log.SampleExtractor, error) { + if e.err != nil { + return nil, e.err + } + // inject in the range vector extractor the outer groups to improve performance. + // This is only possible if the operation is a sum. Anything else needs all labels. + if r, ok := e.Left.(*RangeAggregationExpr); ok && canInjectVectorGrouping(e.Operation, r.Operation) { + // if the range vec operation has no grouping we can push down the vec one. + if r.Grouping == nil { + return r.extractor(e.Grouping) + } + } + return e.Left.Extractor() +} + +// canInjectVectorGrouping tells if a vector operation can inject grouping into the nested range vector. +func canInjectVectorGrouping(vecOp, rangeOp string) bool { + if vecOp != OpTypeSum { + return false + } + switch rangeOp { + case OpRangeTypeBytes, OpRangeTypeBytesRate, OpRangeTypeSum, OpRangeTypeRate, OpRangeTypeCount: + return true + default: + return false + } +} + +func (e *VectorAggregationExpr) String() string { + var params []string + switch e.Operation { + // bottomK and topk can have first parameter as 0 + case OpTypeBottomK, OpTypeTopK: + params = []string{fmt.Sprintf("%d", e.Params), e.Left.String()} + default: + if e.Params != 0 { + params = []string{fmt.Sprintf("%d", e.Params), e.Left.String()} + } else { + params = []string{e.Left.String()} + } + } + return formatVectorOperation(e.Operation, e.Grouping, params...) +} + +// impl SampleExpr +func (e *VectorAggregationExpr) Shardable() bool { + if !shardableOps[e.Operation] || !e.Left.Shardable() { + return false + } + + switch e.Operation { + + case OpTypeCount, OpTypeAvg: + // count is shardable if labels are not mutated + // otherwise distinct values can be present in multiple shards and + // counted twice. + // avg is similar since it's remapped to sum/count. + // TODO(owen-d): this is hard to figure out; we should refactor to + // make these relationships clearer, safer, and more extensible. + shardable := !ReducesLabels(e.Left) + + return shardable + + case OpTypeMax, OpTypeMin: + // max() can be sharded by pushing down the max|min aggregation, + // but max() cannot. It needs to perform the + // aggregation on the total result set, and then pick the max|min. + // For instance, `max(max_over_time)` or `max(rate)` can turn into + // `max( max(rate(shard1)) ++ max(rate(shard2)) ... etc)`, + // but you can’t do + // `max( max(sum(rate(shard1))) ++ max(sum(rate(shard2))) ... etc)` + // because it’s only taking the maximum from each shard, + // but we actually need to sum all the shards then put the max on top + if _, ok := e.Left.(*RangeAggregationExpr); ok { + return true + } + return false + + case OpTypeSum: + // sum can shard & merge vector & range aggregations, but only if + // the resulting computation is commutative and associative. + // This does not apply to min & max, because while `min(min(min))` + // satisfies the above, sum( sum(min(shard1) ++ sum(min(shard2)) ) + // does not + if child, ok := e.Left.(*VectorAggregationExpr); ok { + switch child.Operation { + case OpTypeMin, OpTypeMax: + return false + } + } + return true + + } + + return true +} + +func (e *VectorAggregationExpr) Walk(f WalkFn) { + f(e) + if e.Left == nil { + return + } + e.Left.Walk(f) +} + +func (e *VectorAggregationExpr) Accept(v RootVisitor) { v.VisitVectorAggregation(e) } + +// VectorMatchCardinality describes the cardinality relationship +// of two Vectors in a binary operation. +type VectorMatchCardinality int + +const ( + CardOneToOne VectorMatchCardinality = iota + CardManyToOne + CardOneToMany +) + +func (vmc VectorMatchCardinality) String() string { + switch vmc { + case CardOneToOne: + return "one-to-one" + case CardManyToOne: + return "many-to-one" + case CardOneToMany: + return "one-to-many" + } + panic("promql.VectorMatchCardinality.String: unknown match cardinality") +} + +// VectorMatching describes how elements from two Vectors in a binary +// operation are supposed to be matched. +type VectorMatching struct { + // The cardinality of the two Vectors. + Card VectorMatchCardinality + // MatchingLabels contains the labels which define equality of a pair of + // elements from the Vectors. + MatchingLabels []string + // On includes the given label names from matching, + // rather than excluding them. + On bool + // Include contains additional labels that should be included in + // the result from the side with the lower cardinality. + Include []string +} + +type BinOpOptions struct { + ReturnBool bool + VectorMatching *VectorMatching +} + +type BinOpExpr struct { + SampleExpr + RHS SampleExpr + Op string + Opts *BinOpOptions + err error +} + +func (e *BinOpExpr) MatcherGroups() ([]MatcherRange, error) { + if e.err != nil { + return nil, e.err + } + groups, err := e.SampleExpr.MatcherGroups() + if err != nil { + return nil, err + } + RHSGroups, err := e.RHS.MatcherGroups() + if err != nil { + return nil, err + } + return append(groups, RHSGroups...), nil +} + +func (e *BinOpExpr) String() string { + op := e.Op + if e.Opts != nil { + if e.Opts.ReturnBool { + op = fmt.Sprintf("%s bool", op) + } + if e.Opts.VectorMatching != nil { + group := "" + if e.Opts.VectorMatching.Card == CardManyToOne { + group = OpGroupLeft + } else if e.Opts.VectorMatching.Card == CardOneToMany { + group = OpGroupRight + } + if e.Opts.VectorMatching.Include != nil { + group = fmt.Sprintf("%s (%s)", group, strings.Join(e.Opts.VectorMatching.Include, ",")) + } + + if e.Opts.VectorMatching.On || e.Opts.VectorMatching.MatchingLabels != nil { + on := OpOn + if !e.Opts.VectorMatching.On { + on = OpIgnoring + } + op = fmt.Sprintf("%s %s (%s) %s", op, on, strings.Join(e.Opts.VectorMatching.MatchingLabels, ","), group) + } + } + } + return fmt.Sprintf("(%s %s %s)", e.SampleExpr.String(), op, e.RHS.String()) +} + +// impl SampleExpr +func (e *BinOpExpr) Shardable() bool { + if e.Opts != nil && e.Opts.VectorMatching != nil { + matching := e.Opts.VectorMatching + // prohibit sharding when we're changing the label groupings, + // such as when using `on` grouping or when using + // `ignoring` with a non-zero set of labels to ignore. + // `ignoring ()` is effectively the zero value + // that doesn't mutate labels and is shardable. + if matching.On || len(matching.MatchingLabels) > 0 { + return false + } + } + return shardableOps[e.Op] && e.SampleExpr.Shardable() && e.RHS.Shardable() +} + +func (e *BinOpExpr) Walk(f WalkFn) { + walkAll(f, e.SampleExpr, e.RHS) +} + +func (e *BinOpExpr) Accept(v RootVisitor) { v.VisitBinOp(e) } + +func mustNewBinOpExpr(op string, opts *BinOpOptions, lhs, rhs Expr) SampleExpr { + left, ok := lhs.(SampleExpr) + if !ok { + return &BinOpExpr{err: logqlmodel.NewParseError(fmt.Sprintf( + "unexpected type for left leg of binary operation (%s): %T", + op, + lhs, + ), 0, 0)} + } + + right, ok := rhs.(SampleExpr) + if !ok { + return &BinOpExpr{err: logqlmodel.NewParseError(fmt.Sprintf( + "unexpected type for right leg of binary operation (%s): %T", + op, + rhs, + ), 0, 0)} + } + + leftLit, lOk := left.(*LiteralExpr) + rightLit, rOk := right.(*LiteralExpr) + var leftVal float64 + var rightVal float64 + if lOk { + leftV, err := leftLit.Value() + if err != nil { + return &BinOpExpr{err: err} + } + leftVal = leftV + } + if rOk { + rightV, err := rightLit.Value() + if err != nil { + return &BinOpExpr{err: err} + } + rightVal = rightV + } + if IsLogicalBinOp(op) { + + if lOk { + return &BinOpExpr{err: logqlmodel.NewParseError(fmt.Sprintf( + "unexpected literal for left leg of logical/set binary operation (%s): %f", + op, + leftVal, + ), 0, 0)} + } + + if rOk { + return &BinOpExpr{err: logqlmodel.NewParseError(fmt.Sprintf( + "unexpected literal for right leg of logical/set binary operation (%s): %f", + op, + rightVal, + ), 0, 0)} + } + } + + // map expr like (1+1) -> 2 + if lOk && rOk { + return reduceBinOp(op, leftVal, rightVal) + } + + return &BinOpExpr{ + SampleExpr: left, + RHS: right, + Op: op, + Opts: opts, + err: nil, + } +} + +// Reduces a binary operation expression. A binop is reducible if both of its legs are literal expressions. +// This is because literals need match all labels, which is currently difficult to encode into StepEvaluators. +// Therefore, we ensure a binop can be reduced/simplified, maintaining the invariant that it does not have two literal legs. +func reduceBinOp(op string, left, right float64) *LiteralExpr { + merged, err := MergeBinOp( + op, + &promql.Sample{F: left}, + &promql.Sample{F: right}, + false, + false, + false, + ) + if err != nil { + return &LiteralExpr{err: err} + } + return &LiteralExpr{Val: merged.F} +} + +// MergeBinOp performs `op` on `left` and `right` arguments and return the `promql.Sample` value. +// In case of vector and scalar arguments, MergeBinOp assumes `left` is always vector. +// pass `swap=true` otherwise. +// This matters because, either it's (vector op scalar) or (scalar op vector), the return sample value should +// always be sample value of vector argument. +// https://github.com/grafana/loki/issues/10741 +func MergeBinOp(op string, left, right *promql.Sample, swap, filter, isVectorComparison bool) (*promql.Sample, error) { + var merger func(left, right *promql.Sample) *promql.Sample + + switch op { + case OpTypeAdd: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + res := *left + res.F += right.F + return &res + } + + case OpTypeSub: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + res := *left + res.F -= right.F + return &res + } + + case OpTypeMul: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + res := *left + res.F *= right.F + return &res + } + + case OpTypeDiv: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + res := *left + // guard against divide by zero + if right.F == 0 { + res.F = math.NaN() + } else { + res.F /= right.F + } + return &res + } + + case OpTypeMod: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + res := *left + // guard against divide by zero + if right.F == 0 { + res.F = math.NaN() + } else { + res.F = math.Mod(res.F, right.F) + } + return &res + } + + case OpTypePow: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + res.F = math.Pow(left.F, right.F) + return &res + } + + case OpTypeCmpEQ: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + + val := 0. + if left.F == right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + case OpTypeNEQ: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + val := 0. + if left.F != right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + case OpTypeGT: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + val := 0. + if left.F > right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + case OpTypeGTE: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + val := 0. + if left.F >= right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + case OpTypeLT: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + val := 0. + if left.F < right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + case OpTypeLTE: + merger = func(left, right *promql.Sample) *promql.Sample { + if left == nil || right == nil { + return nil + } + + res := *left + val := 0. + if left.F <= right.F { + val = 1. + } else if filter { + return nil + } + res.F = val + return &res + } + + default: + return nil, errors.Errorf("should never happen: unexpected operation: (%s)", op) + } + + res := merger(left, right) + if !isVectorComparison { + return res, nil + } + + if filter { + // if a filter is enabled vector-wise comparison has returned non-nil, + // ensure we return the vector hand side's sample value, instead of the + // comparison operator's result (1: the truthy answer. a.k.a bool) + + retSample := left + if swap { + retSample = right + } + + if res != nil { + return retSample, nil + } + } + return res, nil +} + +type LiteralExpr struct { + Val float64 `json:"val"` + err error + implicit +} + +func mustNewLiteralExpr(s string, invert bool) *LiteralExpr { + n, err := strconv.ParseFloat(s, 64) + if err != nil { + err = logqlmodel.NewParseError(fmt.Sprintf("unable to parse literal as a float: %s", err.Error()), 0, 0) + } + + if invert { + n = -n + } + + return &LiteralExpr{ + Val: n, + err: err, + } +} + +func (e *LiteralExpr) String() string { + return fmt.Sprint(e.Val) +} + +// literlExpr impls SampleExpr & LogSelectorExpr mainly to reduce the need for more complicated typings +// to facilitate sum types. We'll be type switching when evaluating them anyways +// and they will only be present in binary operation legs. +func (e *LiteralExpr) isSampleExpr() {} +func (e *LiteralExpr) isLogSelectorExpr() {} +func (e *LiteralExpr) Selector() (LogSelectorExpr, error) { return e, e.err } +func (e *LiteralExpr) HasFilter() bool { return false } +func (e *LiteralExpr) Shardable() bool { return true } +func (e *LiteralExpr) Walk(f WalkFn) { f(e) } +func (e *LiteralExpr) Accept(v RootVisitor) { v.VisitLiteral(e) } +func (e *LiteralExpr) Pipeline() (log.Pipeline, error) { return log.NewNoopPipeline(), nil } +func (e *LiteralExpr) Matchers() []*labels.Matcher { return nil } +func (e *LiteralExpr) MatcherGroups() ([]MatcherRange, error) { return nil, e.err } +func (e *LiteralExpr) Extractor() (log.SampleExtractor, error) { return nil, e.err } +func (e *LiteralExpr) Value() (float64, error) { + if e.err != nil { + return 0, e.err + } + return e.Val, nil +} + +// helper used to impl Stringer for vector and range aggregations +// nolint:interfacer +func formatVectorOperation(op string, grouping *Grouping, params ...string) string { + nonEmptyParams := make([]string, 0, len(params)) + for _, p := range params { + if p != "" { + nonEmptyParams = append(nonEmptyParams, p) + } + } + + var sb strings.Builder + sb.WriteString(op) + if grouping != nil && !grouping.Singleton() { + sb.WriteString(grouping.String()) + } + sb.WriteString("(") + sb.WriteString(strings.Join(nonEmptyParams, ",")) + sb.WriteString(")") + return sb.String() +} + +type LabelReplaceExpr struct { + Left SampleExpr + Dst string + Replacement string + Src string + Regex string + Re *regexp.Regexp + err error + + implicit +} + +func mustNewLabelReplaceExpr(left SampleExpr, dst, replacement, src, regex string) *LabelReplaceExpr { + re, err := regexp.Compile("^(?:" + regex + ")$") + if err != nil { + return &LabelReplaceExpr{ + err: logqlmodel.NewParseError(fmt.Sprintf("invalid regex in label_replace: %s", err.Error()), 0, 0), + } + } + return &LabelReplaceExpr{ + Left: left, + Dst: dst, + Replacement: replacement, + Src: src, + Re: re, + Regex: regex, + } +} + +func (e *LabelReplaceExpr) isSampleExpr() {} + +func (e *LabelReplaceExpr) Selector() (LogSelectorExpr, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.Selector() +} + +func (e *LabelReplaceExpr) MatcherGroups() ([]MatcherRange, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.MatcherGroups() +} + +func (e *LabelReplaceExpr) Extractor() (SampleExtractor, error) { + if e.err != nil { + return nil, e.err + } + return e.Left.Extractor() +} + +func (e *LabelReplaceExpr) Shardable() bool { + return false +} + +func (e *LabelReplaceExpr) Walk(f WalkFn) { + f(e) + if e.Left == nil { + return + } + e.Left.Walk(f) +} + +func (e *LabelReplaceExpr) Accept(v RootVisitor) { v.VisitLabelReplace(e) } + +func (e *LabelReplaceExpr) String() string { + var sb strings.Builder + sb.WriteString(OpLabelReplace) + sb.WriteString("(") + sb.WriteString(e.Left.String()) + sb.WriteString(",") + sb.WriteString(strconv.Quote(e.Dst)) + sb.WriteString(",") + sb.WriteString(strconv.Quote(e.Replacement)) + sb.WriteString(",") + sb.WriteString(strconv.Quote(e.Src)) + sb.WriteString(",") + sb.WriteString(strconv.Quote(e.Regex)) + sb.WriteString(")") + return sb.String() +} + +// shardableOps lists the operations which may be sharded, but are not +// guaranteed to be. See the `Shardable()` implementations +// on the respective expr types for more details. +// topk, botk, max, & min all must be concatenated and then evaluated in order to avoid +// potential data loss due to series distribution across shards. +// For example, grouping by `cluster` for a `max` operation may yield +// 2 results on the first shard and 10 results on the second. If we prematurely +// calculated `max`s on each shard, the shard/label combination with `2` may be +// discarded and some other combination with `11` may be reported falsely as the max. +// +// Explanation: this is my (owen-d) best understanding. +// +// For an operation to be shardable, first the sample-operation itself must be associative like (+, *) but not (%, /, ^). +// Secondly, if the operation is part of a vector aggregation expression or utilizes logical/set binary ops, +// the vector operation must be distributive over the sample-operation. +// This ensures that the vector merging operation can be applied repeatedly to data in different shards. +// references: +// https://en.wikipedia.org/wiki/Associative_property +// https://en.wikipedia.org/wiki/Distributive_property +var shardableOps = map[string]bool{ + // vector ops + OpTypeSum: true, + // avg is only marked as shardable because we remap it into sum/count. + OpTypeAvg: true, + OpTypeCount: true, + OpTypeMax: true, + OpTypeMin: true, + + // range vector ops + OpRangeTypeAvg: true, + OpRangeTypeCount: true, + OpRangeTypeRate: true, + OpRangeTypeBytes: true, + OpRangeTypeBytesRate: true, + OpRangeTypeSum: true, + OpRangeTypeMax: true, + OpRangeTypeMin: true, + OpRangeTypeQuantile: true, + + // binops - arith + OpTypeAdd: true, + OpTypeMul: true, +} + +type MatcherRange struct { + Matchers []*labels.Matcher + Interval, Offset time.Duration +} + +func MatcherGroups(expr Expr) ([]MatcherRange, error) { + switch e := expr.(type) { + case SampleExpr: + return e.MatcherGroups() + case LogSelectorExpr: + if xs := e.Matchers(); len(xs) > 0 { + return []MatcherRange{ + { + Matchers: xs, + }, + }, nil + } + return nil, nil + default: + return nil, nil + } +} + +type VectorExpr struct { + Val float64 + err error + implicit +} + +func NewVectorExpr(scalar string) *VectorExpr { + n, err := strconv.ParseFloat(scalar, 64) + if err != nil { + err = logqlmodel.NewParseError(fmt.Sprintf("unable to parse vectorExpr as a float: %s", err.Error()), 0, 0) + } + return &VectorExpr{ + Val: n, + err: err, + } +} + +func (e *VectorExpr) isSampleExpr() {} +func (e *VectorExpr) isLogSelectorExpr() {} + +func (e *VectorExpr) Err() error { + return e.err +} + +func (e *VectorExpr) String() string { + var sb strings.Builder + sb.WriteString(OpTypeVector) + sb.WriteString("(") + sb.WriteString(fmt.Sprintf("%f", e.Val)) + sb.WriteString(")") + return sb.String() +} + +func (e *VectorExpr) Value() (float64, error) { + if e.err != nil { + return 0, e.err + } + return e.Val, nil +} + +func (e *VectorExpr) Selector() (LogSelectorExpr, error) { return e, e.err } +func (e *VectorExpr) HasFilter() bool { return false } +func (e *VectorExpr) Shardable() bool { return false } +func (e *VectorExpr) Walk(f WalkFn) { f(e) } +func (e *VectorExpr) Accept(v RootVisitor) { v.VisitVector(e) } +func (e *VectorExpr) Pipeline() (log.Pipeline, error) { return log.NewNoopPipeline(), nil } +func (e *VectorExpr) Matchers() []*labels.Matcher { return nil } +func (e *VectorExpr) MatcherGroups() ([]MatcherRange, error) { return nil, e.err } +func (e *VectorExpr) Extractor() (log.SampleExtractor, error) { return nil, nil } + +func ReducesLabels(e Expr) (conflict bool) { + e.Walk(func(e Expr) { + switch expr := e.(type) { + case *RangeAggregationExpr: + if groupingReducesLabels(expr.Grouping) { + conflict = true + } + case *VectorAggregationExpr: + if groupingReducesLabels(expr.Grouping) { + conflict = true + } + // Technically, any parser that mutates labels could cause the query + // to be non-shardable _if_ the total (inherent+extracted) labels + // exist on two different shards, but this is incredibly unlikely + // for parsers which add new labels so I (owen-d) am preferring + // to continue sharding in those cases and only prevent sharding + // when using `drop` or `keep` which reduce labels to a smaller subset + // more likely to collide across shards. + case *KeepLabelsExpr, *DropLabelsExpr: + conflict = true + case *LabelFmtExpr: + // TODO(owen-d): renaming is shardable in many cases, but will + // likely require a `sum without ()` wrapper to combine the + // same extracted labelsets executed on different shards + for _, f := range expr.Formats { + if f.Rename { + conflict = true + break + } + } + default: + return + } + }) + return +} + +func groupingReducesLabels(grp *Grouping) bool { + if grp == nil { + return false + } + + // both without(foo) and by (bar) have the potential + // to reduce labels + if len(grp.Groups) > 0 { + return true + } + + return false +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/clone.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/clone.go new file mode 100644 index 00000000..07aeb141 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/clone.go @@ -0,0 +1,298 @@ +package syntax + +import ( + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logql/log" +) + +type cloneVisitor struct { + cloned Expr +} + +var _ RootVisitor = &cloneVisitor{} + +func cloneGrouping(g *Grouping) *Grouping { + copied := &Grouping{ + Without: g.Without, + } + if g.Groups != nil { + copied.Groups = make([]string, len(g.Groups)) + copy(copied.Groups, g.Groups) + } + return copied +} + +func cloneVectorMatching(v *VectorMatching) *VectorMatching { + copied := *v + copy(copied.Include, v.Include) + copy(copied.MatchingLabels, v.MatchingLabels) + + return &copied +} + +func (v *cloneVisitor) VisitBinOp(e *BinOpExpr) { + lhs := MustClone[SampleExpr](e.SampleExpr) + rhs := MustClone[SampleExpr](e.RHS) + copied := &BinOpExpr{ + SampleExpr: lhs, + RHS: rhs, + Op: e.Op, + } + + if e.Opts != nil { + copied.Opts = &BinOpOptions{ + ReturnBool: e.Opts.ReturnBool, + VectorMatching: cloneVectorMatching(e.Opts.VectorMatching), + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitVectorAggregation(e *VectorAggregationExpr) { + copied := &VectorAggregationExpr{ + Left: MustClone[SampleExpr](e.Left), + Params: e.Params, + Operation: e.Operation, + } + + if e.Grouping != nil { + copied.Grouping = cloneGrouping(e.Grouping) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitRangeAggregation(e *RangeAggregationExpr) { + copied := &RangeAggregationExpr{ + Left: MustClone[*LogRange](e.Left), + Operation: e.Operation, + } + + if e.Grouping != nil { + copied.Grouping = cloneGrouping(e.Grouping) + } + + if e.Params != nil { + tmp := *e.Params + copied.Params = &tmp + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelReplace(e *LabelReplaceExpr) { + left := MustClone[SampleExpr](e.Left) + v.cloned = mustNewLabelReplaceExpr(left, e.Dst, e.Replacement, e.Src, e.Regex) +} + +func (v *cloneVisitor) VisitLiteral(e *LiteralExpr) { + v.cloned = &LiteralExpr{Val: e.Val} +} + +func (v *cloneVisitor) VisitVector(e *VectorExpr) { + v.cloned = &VectorExpr{Val: e.Val} +} + +func (v *cloneVisitor) VisitLogRange(e *LogRange) { + copied := &LogRange{ + Left: MustClone[LogSelectorExpr](e.Left), + Interval: e.Interval, + Offset: e.Offset, + } + if e.Unwrap != nil { + copied.Unwrap = &UnwrapExpr{ + Identifier: e.Unwrap.Identifier, + Operation: e.Unwrap.Operation, + } + if e.Unwrap.PostFilters != nil { + copied.Unwrap.PostFilters = make([]log.LabelFilterer, len(e.Unwrap.PostFilters)) + for i, f := range e.Unwrap.PostFilters { + copied.Unwrap.PostFilters[i] = cloneLabelFilterer(f) + } + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitMatchers(e *MatchersExpr) { + copied := &MatchersExpr{ + Mts: make([]*labels.Matcher, len(e.Mts)), + } + for i, m := range e.Mts { + copied.Mts[i] = labels.MustNewMatcher(m.Type, m.Name, m.Value) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitPipeline(e *PipelineExpr) { + copied := &PipelineExpr{ + Left: MustClone[*MatchersExpr](e.Left), + MultiStages: make(MultiStageExpr, len(e.MultiStages)), + } + for i, s := range e.MultiStages { + copied.MultiStages[i] = MustClone[StageExpr](s) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitDecolorize(*DecolorizeExpr) { + v.cloned = &DecolorizeExpr{} +} + +func (v *cloneVisitor) VisitDropLabels(e *DropLabelsExpr) { + copied := &DropLabelsExpr{ + dropLabels: make([]log.DropLabel, len(e.dropLabels)), + } + for i, l := range e.dropLabels { + var matcher *labels.Matcher + if l.Matcher != nil { + matcher = labels.MustNewMatcher(l.Matcher.Type, l.Matcher.Name, l.Matcher.Value) + } + copied.dropLabels[i] = log.NewDropLabel(matcher, l.Name) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParser) { + copied := &JSONExpressionParser{ + Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), + } + copy(copied.Expressions, e.Expressions) + + v.cloned = copied +} + +func (v *cloneVisitor) VisitKeepLabel(e *KeepLabelsExpr) { + copied := &KeepLabelsExpr{ + keepLabels: make([]log.KeepLabel, len(e.keepLabels)), + } + for i, k := range e.keepLabels { + copied.keepLabels[i] = log.KeepLabel{ + Name: k.Name, + } + if k.Matcher != nil { + copied.keepLabels[i].Matcher = labels.MustNewMatcher(k.Matcher.Type, k.Matcher.Name, k.Matcher.Value) + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelFilter(e *LabelFilterExpr) { + v.cloned = &LabelFilterExpr{ + LabelFilterer: cloneLabelFilterer(e.LabelFilterer), + } +} + +func cloneLabelFilterer(filter log.LabelFilterer) log.LabelFilterer { + switch concrete := filter.(type) { + case *log.BinaryLabelFilter: + return &log.BinaryLabelFilter{ + Left: cloneLabelFilterer(concrete.Left), + Right: cloneLabelFilterer(concrete.Right), + And: concrete.And, + } + case *log.NoopLabelFilter: + copied := &log.NoopLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + + return copied + case *log.BytesLabelFilter: + return &log.BytesLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.DurationLabelFilter: + return &log.DurationLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.NumericLabelFilter: + return &log.NumericLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.StringLabelFilter: + copied := &log.StringLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + return copied + case *log.LineFilterLabelFilter: + copied := &log.LineFilterLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + return copied + case *log.IPLabelFilter: + return log.NewIPLabelFilter(concrete.Pattern, concrete.Label, concrete.Ty) + } + return nil +} + +func (v *cloneVisitor) VisitLabelFmt(e *LabelFmtExpr) { + copied := &LabelFmtExpr{ + Formats: make([]log.LabelFmt, len(e.Formats)), + } + copy(copied.Formats, e.Formats) + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelParser(e *LabelParserExpr) { + v.cloned = &LabelParserExpr{ + Op: e.Op, + Param: e.Param, + } +} + +func (v *cloneVisitor) VisitLineFilter(e *LineFilterExpr) { + copied := &LineFilterExpr{ + Ty: e.Ty, + Match: e.Match, + Op: e.Op, + IsOrChild: e.IsOrChild, + } + + if e.Left != nil { + copied.Left = MustClone[*LineFilterExpr](e.Left) + } + + if e.Or != nil { + copied.Or = MustClone[*LineFilterExpr](e.Or) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLineFmt(e *LineFmtExpr) { + v.cloned = &LineFmtExpr{Value: e.Value} +} + +func (v *cloneVisitor) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) { + copied := &LogfmtExpressionParser{ + Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), + Strict: e.Strict, + KeepEmpty: e.KeepEmpty, + } + copy(copied.Expressions, e.Expressions) + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLogfmtParser(e *LogfmtParserExpr) { + v.cloned = &LogfmtParserExpr{ + Strict: e.Strict, + KeepEmpty: e.KeepEmpty, + } +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y b/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y new file mode 100644 index 00000000..7e801480 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y @@ -0,0 +1,580 @@ +%{ +package syntax + +import ( + "time" + "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/pkg/logql/log" + +) +%} + +%union{ + Expr Expr + Filter labels.MatchType + Grouping *Grouping + Labels []string + LogExpr LogSelectorExpr + LogRangeExpr *LogRange + Matcher *labels.Matcher + Matchers []*labels.Matcher + RangeAggregationExpr SampleExpr + RangeOp string + ConvOp string + Selector []*labels.Matcher + VectorAggregationExpr SampleExpr + VectorExpr *VectorExpr + Vector string + MetricExpr SampleExpr + VectorOp string + FilterOp string + BinOpExpr SampleExpr + LabelReplaceExpr SampleExpr + binOp string + bytes uint64 + str string + duration time.Duration + LiteralExpr *LiteralExpr + BinOpModifier *BinOpOptions + BoolModifier *BinOpOptions + OnOrIgnoringModifier *BinOpOptions + LabelParser *LabelParserExpr + LogfmtParser *LogfmtParserExpr + LineFilters *LineFilterExpr + LineFilter *LineFilterExpr + OrFilter *LineFilterExpr + ParserFlags []string + PipelineExpr MultiStageExpr + PipelineStage StageExpr + BytesFilter log.LabelFilterer + NumberFilter log.LabelFilterer + DurationFilter log.LabelFilterer + LabelFilter log.LabelFilterer + UnitFilter log.LabelFilterer + IPLabelFilter log.LabelFilterer + LineFormatExpr *LineFmtExpr + LabelFormatExpr *LabelFmtExpr + LabelFormat log.LabelFmt + LabelsFormat []log.LabelFmt + + LabelExtractionExpression log.LabelExtractionExpr + LabelExtractionExpressionList []log.LabelExtractionExpr + JSONExpressionParser *JSONExpressionParser + LogfmtExpressionParser *LogfmtExpressionParser + + UnwrapExpr *UnwrapExpr + DecolorizeExpr *DecolorizeExpr + OffsetExpr *OffsetExpr + DropLabel log.DropLabel + DropLabels []log.DropLabel + DropLabelsExpr *DropLabelsExpr + KeepLabel log.KeepLabel + KeepLabels []log.KeepLabel + KeepLabelsExpr *KeepLabelsExpr +} + +%start root + +%type expr +%type filter +%type grouping +%type labels +%type logExpr +%type metricExpr +%type logRangeExpr +%type matcher +%type matchers +%type rangeAggregationExpr +%type rangeOp +%type convOp +%type selector +%type vectorAggregationExpr +%type vectorOp +%type vectorExpr +%type vector +%type filterOp +%type binOpExpr +%type literalExpr +%type labelReplaceExpr +%type binOpModifier +%type boolModifier +%type onOrIgnoringModifier +%type labelParser +%type logfmtParser +%type pipelineExpr +%type pipelineStage +%type bytesFilter +%type numberFilter +%type durationFilter +%type labelFilter +%type lineFilters +%type lineFilter +%type orFilter +%type parserFlags +%type lineFormatExpr +%type decolorizeExpr +%type dropLabelsExpr +%type dropLabels +%type dropLabel +%type keepLabelsExpr +%type keepLabels +%type keepLabel +%type labelFormatExpr +%type labelFormat +%type labelsFormat +%type labelExtractionExpression +%type labelExtractionExpressionList +%type logfmtExpressionParser +%type jsonExpressionParser +%type unwrapExpr +%type unitFilter +%type ipLabelFilter +%type offsetExpr + +%token BYTES +%token IDENTIFIER STRING NUMBER PARSER_FLAG +%token DURATION RANGE +%token MATCHERS LABELS EQ RE NRE OPEN_BRACE CLOSE_BRACE OPEN_BRACKET CLOSE_BRACKET COMMA DOT PIPE_MATCH PIPE_EXACT + OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE RATE_COUNTER SUM SORT SORT_DESC AVG MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK + BYTES_OVER_TIME BYTES_RATE BOOL JSON REGEXP LOGFMT PIPE LINE_FMT LABEL_FMT UNWRAP AVG_OVER_TIME SUM_OVER_TIME MIN_OVER_TIME + MAX_OVER_TIME STDVAR_OVER_TIME STDDEV_OVER_TIME QUANTILE_OVER_TIME BYTES_CONV DURATION_CONV DURATION_SECONDS_CONV + FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME VECTOR LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING GROUP_LEFT GROUP_RIGHT + DECOLORIZE DROP KEEP + +// Operators are listed with increasing precedence. +%left OR +%left AND UNLESS +%left CMP_EQ NEQ LT LTE GT GTE +%left ADD SUB +%left MUL DIV MOD +%right POW + +%% + +root: expr { exprlex.(*parser).expr = $1 }; + +expr: + logExpr { $$ = $1 } + | metricExpr { $$ = $1 } + ; + +metricExpr: + rangeAggregationExpr { $$ = $1 } + | vectorAggregationExpr { $$ = $1 } + | binOpExpr { $$ = $1 } + | literalExpr { $$ = $1 } + | labelReplaceExpr { $$ = $1 } + | vectorExpr { $$ = $1 } + | OPEN_PARENTHESIS metricExpr CLOSE_PARENTHESIS { $$ = $2 } + ; + +logExpr: + selector { $$ = newMatcherExpr($1)} + | selector pipelineExpr { $$ = newPipelineExpr(newMatcherExpr($1), $2)} + | OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2 } + ; + +logRangeExpr: + selector RANGE { $$ = newLogRange(newMatcherExpr($1), $2, nil, nil ) } + | selector RANGE offsetExpr { $$ = newLogRange(newMatcherExpr($1), $2, nil, $3 ) } + | OPEN_PARENTHESIS selector CLOSE_PARENTHESIS RANGE { $$ = newLogRange(newMatcherExpr($2), $4, nil, nil ) } + | OPEN_PARENTHESIS selector CLOSE_PARENTHESIS RANGE offsetExpr { $$ = newLogRange(newMatcherExpr($2), $4, nil, $5 ) } + | selector RANGE unwrapExpr { $$ = newLogRange(newMatcherExpr($1), $2, $3, nil ) } + | selector RANGE offsetExpr unwrapExpr { $$ = newLogRange(newMatcherExpr($1), $2, $4, $3 ) } + | OPEN_PARENTHESIS selector CLOSE_PARENTHESIS RANGE unwrapExpr { $$ = newLogRange(newMatcherExpr($2), $4, $5, nil ) } + | OPEN_PARENTHESIS selector CLOSE_PARENTHESIS RANGE offsetExpr unwrapExpr { $$ = newLogRange(newMatcherExpr($2), $4, $6, $5 ) } + | selector unwrapExpr RANGE { $$ = newLogRange(newMatcherExpr($1), $3, $2, nil ) } + | selector unwrapExpr RANGE offsetExpr { $$ = newLogRange(newMatcherExpr($1), $3, $2, $4 ) } + | OPEN_PARENTHESIS selector unwrapExpr CLOSE_PARENTHESIS RANGE { $$ = newLogRange(newMatcherExpr($2), $5, $3, nil ) } + | OPEN_PARENTHESIS selector unwrapExpr CLOSE_PARENTHESIS RANGE offsetExpr { $$ = newLogRange(newMatcherExpr($2), $5, $3, $6 ) } + | selector pipelineExpr RANGE { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $2), $3, nil, nil ) } + | selector pipelineExpr RANGE offsetExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $2), $3, nil, $4 ) } + | OPEN_PARENTHESIS selector pipelineExpr CLOSE_PARENTHESIS RANGE { $$ = newLogRange(newPipelineExpr(newMatcherExpr($2), $3), $5, nil, nil ) } + | OPEN_PARENTHESIS selector pipelineExpr CLOSE_PARENTHESIS RANGE offsetExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($2), $3), $5, nil, $6 ) } + | selector pipelineExpr unwrapExpr RANGE { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $2), $4, $3, nil ) } + | selector pipelineExpr unwrapExpr RANGE offsetExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $2), $4, $3, $5 ) } + | OPEN_PARENTHESIS selector pipelineExpr unwrapExpr CLOSE_PARENTHESIS RANGE { $$ = newLogRange(newPipelineExpr(newMatcherExpr($2), $3), $6, $4, nil ) } + | OPEN_PARENTHESIS selector pipelineExpr unwrapExpr CLOSE_PARENTHESIS RANGE offsetExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($2), $3), $6, $4, $7 ) } + | selector RANGE pipelineExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $3), $2, nil, nil) } + | selector RANGE offsetExpr pipelineExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $4), $2, nil, $3 ) } + | selector RANGE pipelineExpr unwrapExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $3), $2, $4, nil ) } + | selector RANGE offsetExpr pipelineExpr unwrapExpr { $$ = newLogRange(newPipelineExpr(newMatcherExpr($1), $4), $2, $5, $3 ) } + | OPEN_PARENTHESIS logRangeExpr CLOSE_PARENTHESIS { $$ = $2 } + | logRangeExpr error + ; + +unwrapExpr: + PIPE UNWRAP IDENTIFIER { $$ = newUnwrapExpr($3, "")} + | PIPE UNWRAP convOp OPEN_PARENTHESIS IDENTIFIER CLOSE_PARENTHESIS { $$ = newUnwrapExpr($5, $3)} + | unwrapExpr PIPE labelFilter { $$ = $1.addPostFilter($3) } + ; + +convOp: + BYTES_CONV { $$ = OpConvBytes } + | DURATION_CONV { $$ = OpConvDuration } + | DURATION_SECONDS_CONV { $$ = OpConvDurationSeconds } + ; + +rangeAggregationExpr: + rangeOp OPEN_PARENTHESIS logRangeExpr CLOSE_PARENTHESIS { $$ = newRangeAggregationExpr($3, $1, nil, nil) } + | rangeOp OPEN_PARENTHESIS NUMBER COMMA logRangeExpr CLOSE_PARENTHESIS { $$ = newRangeAggregationExpr($5, $1, nil, &$3) } + | rangeOp OPEN_PARENTHESIS logRangeExpr CLOSE_PARENTHESIS grouping { $$ = newRangeAggregationExpr($3, $1, $5, nil) } + | rangeOp OPEN_PARENTHESIS NUMBER COMMA logRangeExpr CLOSE_PARENTHESIS grouping { $$ = newRangeAggregationExpr($5, $1, $7, &$3) } + ; + +vectorAggregationExpr: + // Aggregations with 1 argument. + vectorOp OPEN_PARENTHESIS metricExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($3, $1, nil, nil) } + | vectorOp grouping OPEN_PARENTHESIS metricExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($4, $1, $2, nil,) } + | vectorOp OPEN_PARENTHESIS metricExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($3, $1, $5, nil) } + // Aggregations with 2 arguments. + | vectorOp OPEN_PARENTHESIS NUMBER COMMA metricExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($5, $1, nil, &$3) } + | vectorOp OPEN_PARENTHESIS NUMBER COMMA metricExpr CLOSE_PARENTHESIS grouping { $$ = mustNewVectorAggregationExpr($5, $1, $7, &$3) } + | vectorOp grouping OPEN_PARENTHESIS NUMBER COMMA metricExpr CLOSE_PARENTHESIS { $$ = mustNewVectorAggregationExpr($6, $1, $2, &$4) } + ; + +labelReplaceExpr: + LABEL_REPLACE OPEN_PARENTHESIS metricExpr COMMA STRING COMMA STRING COMMA STRING COMMA STRING CLOSE_PARENTHESIS + { $$ = mustNewLabelReplaceExpr($3, $5, $7, $9, $11)} + ; + +filter: + PIPE_MATCH { $$ = labels.MatchRegexp } + | PIPE_EXACT { $$ = labels.MatchEqual } + | NRE { $$ = labels.MatchNotRegexp } + | NEQ { $$ = labels.MatchNotEqual } + ; + +selector: + OPEN_BRACE matchers CLOSE_BRACE { $$ = $2 } + | OPEN_BRACE matchers error { $$ = $2 } + | OPEN_BRACE CLOSE_BRACE { } + ; + +matchers: + matcher { $$ = []*labels.Matcher{ $1 } } + | matchers COMMA matcher { $$ = append($1, $3) } + ; + +matcher: + IDENTIFIER EQ STRING { $$ = mustNewMatcher(labels.MatchEqual, $1, $3) } + | IDENTIFIER NEQ STRING { $$ = mustNewMatcher(labels.MatchNotEqual, $1, $3) } + | IDENTIFIER RE STRING { $$ = mustNewMatcher(labels.MatchRegexp, $1, $3) } + | IDENTIFIER NRE STRING { $$ = mustNewMatcher(labels.MatchNotRegexp, $1, $3) } + ; + +pipelineExpr: + pipelineStage { $$ = MultiStageExpr{ $1 } } + | pipelineExpr pipelineStage { $$ = append($1, $2)} + ; + +pipelineStage: + lineFilters { $$ = $1 } + | PIPE logfmtParser { $$ = $2 } + | PIPE labelParser { $$ = $2 } + | PIPE jsonExpressionParser { $$ = $2 } + | PIPE logfmtExpressionParser { $$ = $2 } + | PIPE labelFilter { $$ = &LabelFilterExpr{LabelFilterer: $2 }} + | PIPE lineFormatExpr { $$ = $2 } + | PIPE decolorizeExpr { $$ = $2 } + | PIPE labelFormatExpr { $$ = $2 } + | PIPE dropLabelsExpr { $$ = $2 } + | PIPE keepLabelsExpr { $$ = $2 } + ; + +filterOp: + IP { $$ = OpFilterIP } + ; + +orFilter: + STRING { $$ = newLineFilterExpr(labels.MatchEqual, "", $1) } + | filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr(labels.MatchEqual, $1, $3) } + | STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr(labels.MatchEqual, "", $1), $3) } + ; + +lineFilter: + filter STRING { $$ = newLineFilterExpr($1, "", $2) } + | filter filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr($1, $2, $4) } + | filter STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr($1, "", $2), $4) } + ; + +lineFilters: + lineFilter { $$ = $1 } + | lineFilter OR orFilter { $$ = newOrLineFilter($1, $3)} + | lineFilters lineFilter { $$ = newNestedLineFilterExpr($1, $2) } + ; + +parserFlags: + PARSER_FLAG { $$ = []string{ $1 } } + | parserFlags PARSER_FLAG { $$ = append($1, $2) } + ; + +logfmtParser: + LOGFMT { $$ = newLogfmtParserExpr(nil) } + | LOGFMT parserFlags { $$ = newLogfmtParserExpr($2) } + ; + +labelParser: + JSON { $$ = newLabelParserExpr(OpParserTypeJSON, "") } + | REGEXP STRING { $$ = newLabelParserExpr(OpParserTypeRegexp, $2) } + | UNPACK { $$ = newLabelParserExpr(OpParserTypeUnpack, "") } + | PATTERN STRING { $$ = newLabelParserExpr(OpParserTypePattern, $2) } + ; + +jsonExpressionParser: + JSON labelExtractionExpressionList { $$ = newJSONExpressionParser($2) } + +logfmtExpressionParser: + LOGFMT parserFlags labelExtractionExpressionList { $$ = newLogfmtExpressionParser($3, $2)} + | LOGFMT labelExtractionExpressionList { $$ = newLogfmtExpressionParser($2, nil)} + ; + +lineFormatExpr: LINE_FMT STRING { $$ = newLineFmtExpr($2) }; + +decolorizeExpr: DECOLORIZE { $$ = newDecolorizeExpr() }; + +labelFormat: + IDENTIFIER EQ IDENTIFIER { $$ = log.NewRenameLabelFmt($1, $3)} + | IDENTIFIER EQ STRING { $$ = log.NewTemplateLabelFmt($1, $3)} + ; + +labelsFormat: + labelFormat { $$ = []log.LabelFmt{ $1 } } + | labelsFormat COMMA labelFormat { $$ = append($1, $3) } + | labelsFormat COMMA error + ; + +labelFormatExpr: + LABEL_FMT labelsFormat { $$ = newLabelFmtExpr($2) }; + +labelFilter: + matcher { $$ = log.NewStringLabelFilter($1) } + | ipLabelFilter { $$ = $1 } + | unitFilter { $$ = $1 } + | numberFilter { $$ = $1 } + | OPEN_PARENTHESIS labelFilter CLOSE_PARENTHESIS { $$ = $2 } + | labelFilter labelFilter { $$ = log.NewAndLabelFilter($1, $2 ) } + | labelFilter AND labelFilter { $$ = log.NewAndLabelFilter($1, $3 ) } + | labelFilter COMMA labelFilter { $$ = log.NewAndLabelFilter($1, $3 ) } + | labelFilter OR labelFilter { $$ = log.NewOrLabelFilter($1, $3 ) } + ; + +labelExtractionExpression: + IDENTIFIER EQ STRING { $$ = log.NewLabelExtractionExpr($1, $3) } + | IDENTIFIER { $$ = log.NewLabelExtractionExpr($1, $1) } + +labelExtractionExpressionList: + labelExtractionExpression { $$ = []log.LabelExtractionExpr{$1} } + | labelExtractionExpressionList COMMA labelExtractionExpression { $$ = append($1, $3) } + ; + +ipLabelFilter: + IDENTIFIER EQ IP OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = log.NewIPLabelFilter($5, $1,log.LabelFilterEqual) } + | IDENTIFIER NEQ IP OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = log.NewIPLabelFilter($5, $1, log.LabelFilterNotEqual) } + ; + +unitFilter: + durationFilter { $$ = $1 } + | bytesFilter { $$ = $1 } + +durationFilter: + IDENTIFIER GT DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, $1, $3) } + | IDENTIFIER GTE DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, $1, $3) } + | IDENTIFIER LT DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterLesserThan, $1, $3) } + | IDENTIFIER LTE DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, $1, $3) } + | IDENTIFIER NEQ DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterNotEqual, $1, $3) } + | IDENTIFIER EQ DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterEqual, $1, $3) } + | IDENTIFIER CMP_EQ DURATION { $$ = log.NewDurationLabelFilter(log.LabelFilterEqual, $1, $3) } + ; + +bytesFilter: + IDENTIFIER GT BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, $1, $3) } + | IDENTIFIER GTE BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, $1, $3) } + | IDENTIFIER LT BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterLesserThan, $1, $3) } + | IDENTIFIER LTE BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, $1, $3) } + | IDENTIFIER NEQ BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterNotEqual, $1, $3) } + | IDENTIFIER EQ BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterEqual, $1, $3) } + | IDENTIFIER CMP_EQ BYTES { $$ = log.NewBytesLabelFilter(log.LabelFilterEqual, $1, $3) } + ; + +numberFilter: + IDENTIFIER GT NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, $1, mustNewFloat($3))} + | IDENTIFIER GTE NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, $1, mustNewFloat($3))} + | IDENTIFIER LT NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterLesserThan, $1, mustNewFloat($3))} + | IDENTIFIER LTE NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, $1, mustNewFloat($3))} + | IDENTIFIER NEQ NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterNotEqual, $1, mustNewFloat($3))} + | IDENTIFIER EQ NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterEqual, $1, mustNewFloat($3))} + | IDENTIFIER CMP_EQ NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterEqual, $1, mustNewFloat($3))} + ; + +dropLabel: + IDENTIFIER { $$ = log.NewDropLabel(nil, $1) } + | matcher { $$ = log.NewDropLabel($1, "") } + +dropLabels: + dropLabel { $$ = []log.DropLabel{$1}} + | dropLabels COMMA dropLabel { $$ = append($1, $3) } + ; + +dropLabelsExpr: DROP dropLabels { $$ = newDropLabelsExpr($2) } + +keepLabel: + IDENTIFIER { $$ = log.NewKeepLabel(nil, $1) } + | matcher { $$ = log.NewKeepLabel($1, "") } + +keepLabels: + keepLabel { $$ = []log.KeepLabel{$1}} + | keepLabels COMMA keepLabel { $$ = append($1, $3) } + ; + +keepLabelsExpr: KEEP keepLabels { $$ = newKeepLabelsExpr($2) } + +// Operator precedence only works if each of these is listed separately. +binOpExpr: + expr OR binOpModifier expr { $$ = mustNewBinOpExpr("or", $3, $1, $4) } + | expr AND binOpModifier expr { $$ = mustNewBinOpExpr("and", $3, $1, $4) } + | expr UNLESS binOpModifier expr { $$ = mustNewBinOpExpr("unless", $3, $1, $4) } + | expr ADD binOpModifier expr { $$ = mustNewBinOpExpr("+", $3, $1, $4) } + | expr SUB binOpModifier expr { $$ = mustNewBinOpExpr("-", $3, $1, $4) } + | expr MUL binOpModifier expr { $$ = mustNewBinOpExpr("*", $3, $1, $4) } + | expr DIV binOpModifier expr { $$ = mustNewBinOpExpr("/", $3, $1, $4) } + | expr MOD binOpModifier expr { $$ = mustNewBinOpExpr("%", $3, $1, $4) } + | expr POW binOpModifier expr { $$ = mustNewBinOpExpr("^", $3, $1, $4) } + | expr CMP_EQ binOpModifier expr { $$ = mustNewBinOpExpr("==", $3, $1, $4) } + | expr NEQ binOpModifier expr { $$ = mustNewBinOpExpr("!=", $3, $1, $4) } + | expr GT binOpModifier expr { $$ = mustNewBinOpExpr(">", $3, $1, $4) } + | expr GTE binOpModifier expr { $$ = mustNewBinOpExpr(">=", $3, $1, $4) } + | expr LT binOpModifier expr { $$ = mustNewBinOpExpr("<", $3, $1, $4) } + | expr LTE binOpModifier expr { $$ = mustNewBinOpExpr("<=", $3, $1, $4) } + ; + +boolModifier: + { + $$ = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} + } + | BOOL + { + $$ = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool:true} + } + ; + +onOrIgnoringModifier: + boolModifier ON OPEN_PARENTHESIS labels CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.On=true + $$.VectorMatching.MatchingLabels=$4 + } + | boolModifier ON OPEN_PARENTHESIS CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.On=true + } + | boolModifier IGNORING OPEN_PARENTHESIS labels CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.MatchingLabels=$4 + } + | boolModifier IGNORING OPEN_PARENTHESIS CLOSE_PARENTHESIS + { + $$ = $1 + } + ; + +binOpModifier: + boolModifier {$$ = $1 } + | onOrIgnoringModifier {$$ = $1 } + | onOrIgnoringModifier GROUP_LEFT + { + $$ = $1 + $$.VectorMatching.Card = CardManyToOne + } + | onOrIgnoringModifier GROUP_LEFT OPEN_PARENTHESIS CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.Card = CardManyToOne + } + | onOrIgnoringModifier GROUP_LEFT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.Card = CardManyToOne + $$.VectorMatching.Include = $4 + } + | onOrIgnoringModifier GROUP_RIGHT + { + $$ = $1 + $$.VectorMatching.Card = CardOneToMany + } + | onOrIgnoringModifier GROUP_RIGHT OPEN_PARENTHESIS CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.Card = CardOneToMany + } + | onOrIgnoringModifier GROUP_RIGHT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS + { + $$ = $1 + $$.VectorMatching.Card = CardOneToMany + $$.VectorMatching.Include = $4 + } + ; + +literalExpr: + NUMBER { $$ = mustNewLiteralExpr( $1, false ) } + | ADD NUMBER { $$ = mustNewLiteralExpr( $2, false ) } + | SUB NUMBER { $$ = mustNewLiteralExpr( $2, true ) } + ; + +vectorExpr: + vector OPEN_PARENTHESIS NUMBER CLOSE_PARENTHESIS { $$ = NewVectorExpr( $3 ) } + ; +vector: + VECTOR { $$ = OpTypeVector } + ; + +vectorOp: + SUM { $$ = OpTypeSum } + | AVG { $$ = OpTypeAvg } + | COUNT { $$ = OpTypeCount } + | MAX { $$ = OpTypeMax } + | MIN { $$ = OpTypeMin } + | STDDEV { $$ = OpTypeStddev } + | STDVAR { $$ = OpTypeStdvar } + | BOTTOMK { $$ = OpTypeBottomK } + | TOPK { $$ = OpTypeTopK } + | SORT { $$ = OpTypeSort } + | SORT_DESC { $$ = OpTypeSortDesc } + ; + +rangeOp: + COUNT_OVER_TIME { $$ = OpRangeTypeCount } + | RATE { $$ = OpRangeTypeRate } + | RATE_COUNTER { $$ = OpRangeTypeRateCounter } + | BYTES_OVER_TIME { $$ = OpRangeTypeBytes } + | BYTES_RATE { $$ = OpRangeTypeBytesRate } + | AVG_OVER_TIME { $$ = OpRangeTypeAvg } + | SUM_OVER_TIME { $$ = OpRangeTypeSum } + | MIN_OVER_TIME { $$ = OpRangeTypeMin } + | MAX_OVER_TIME { $$ = OpRangeTypeMax } + | STDVAR_OVER_TIME { $$ = OpRangeTypeStdvar } + | STDDEV_OVER_TIME { $$ = OpRangeTypeStddev } + | QUANTILE_OVER_TIME { $$ = OpRangeTypeQuantile } + | FIRST_OVER_TIME { $$ = OpRangeTypeFirst } + | LAST_OVER_TIME { $$ = OpRangeTypeLast } + | ABSENT_OVER_TIME { $$ = OpRangeTypeAbsent } + ; + +offsetExpr: + OFFSET DURATION { $$ = newOffsetExpr( $2 ) } + +labels: + IDENTIFIER { $$ = []string{ $1 } } + | labels COMMA IDENTIFIER { $$ = append($1, $3) } + ; + +grouping: + BY OPEN_PARENTHESIS labels CLOSE_PARENTHESIS { $$ = &Grouping{ Without: false , Groups: $3 } } + | WITHOUT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS { $$ = &Grouping{ Without: true , Groups: $3 } } + | BY OPEN_PARENTHESIS CLOSE_PARENTHESIS { $$ = &Grouping{ Without: false , Groups: nil } } + | WITHOUT OPEN_PARENTHESIS CLOSE_PARENTHESIS { $$ = &Grouping{ Without: true , Groups: nil } } + ; +%% diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y.go new file mode 100644 index 00000000..1f38ab57 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/expr.y.go @@ -0,0 +1,2020 @@ +// Code generated by goyacc -p expr -o pkg/logql/syntax/expr.y.go pkg/logql/syntax/expr.y. DO NOT EDIT. + +package syntax + +import __yyfmt__ "fmt" + + +import ( + "github.com/grafana/loki/pkg/logql/log" + "github.com/prometheus/prometheus/model/labels" + "time" +) + +type exprSymType struct { + yys int + Expr Expr + Filter labels.MatchType + Grouping *Grouping + Labels []string + LogExpr LogSelectorExpr + LogRangeExpr *LogRange + Matcher *labels.Matcher + Matchers []*labels.Matcher + RangeAggregationExpr SampleExpr + RangeOp string + ConvOp string + Selector []*labels.Matcher + VectorAggregationExpr SampleExpr + VectorExpr *VectorExpr + Vector string + MetricExpr SampleExpr + VectorOp string + FilterOp string + BinOpExpr SampleExpr + LabelReplaceExpr SampleExpr + binOp string + bytes uint64 + str string + duration time.Duration + LiteralExpr *LiteralExpr + BinOpModifier *BinOpOptions + BoolModifier *BinOpOptions + OnOrIgnoringModifier *BinOpOptions + LabelParser *LabelParserExpr + LogfmtParser *LogfmtParserExpr + LineFilters *LineFilterExpr + LineFilter *LineFilterExpr + OrFilter *LineFilterExpr + ParserFlags []string + PipelineExpr MultiStageExpr + PipelineStage StageExpr + BytesFilter log.LabelFilterer + NumberFilter log.LabelFilterer + DurationFilter log.LabelFilterer + LabelFilter log.LabelFilterer + UnitFilter log.LabelFilterer + IPLabelFilter log.LabelFilterer + LineFormatExpr *LineFmtExpr + LabelFormatExpr *LabelFmtExpr + LabelFormat log.LabelFmt + LabelsFormat []log.LabelFmt + + LabelExtractionExpression log.LabelExtractionExpr + LabelExtractionExpressionList []log.LabelExtractionExpr + JSONExpressionParser *JSONExpressionParser + LogfmtExpressionParser *LogfmtExpressionParser + + UnwrapExpr *UnwrapExpr + DecolorizeExpr *DecolorizeExpr + OffsetExpr *OffsetExpr + DropLabel log.DropLabel + DropLabels []log.DropLabel + DropLabelsExpr *DropLabelsExpr + KeepLabel log.KeepLabel + KeepLabels []log.KeepLabel + KeepLabelsExpr *KeepLabelsExpr +} + +const BYTES = 57346 +const IDENTIFIER = 57347 +const STRING = 57348 +const NUMBER = 57349 +const PARSER_FLAG = 57350 +const DURATION = 57351 +const RANGE = 57352 +const MATCHERS = 57353 +const LABELS = 57354 +const EQ = 57355 +const RE = 57356 +const NRE = 57357 +const OPEN_BRACE = 57358 +const CLOSE_BRACE = 57359 +const OPEN_BRACKET = 57360 +const CLOSE_BRACKET = 57361 +const COMMA = 57362 +const DOT = 57363 +const PIPE_MATCH = 57364 +const PIPE_EXACT = 57365 +const OPEN_PARENTHESIS = 57366 +const CLOSE_PARENTHESIS = 57367 +const BY = 57368 +const WITHOUT = 57369 +const COUNT_OVER_TIME = 57370 +const RATE = 57371 +const RATE_COUNTER = 57372 +const SUM = 57373 +const SORT = 57374 +const SORT_DESC = 57375 +const AVG = 57376 +const MAX = 57377 +const MIN = 57378 +const COUNT = 57379 +const STDDEV = 57380 +const STDVAR = 57381 +const BOTTOMK = 57382 +const TOPK = 57383 +const BYTES_OVER_TIME = 57384 +const BYTES_RATE = 57385 +const BOOL = 57386 +const JSON = 57387 +const REGEXP = 57388 +const LOGFMT = 57389 +const PIPE = 57390 +const LINE_FMT = 57391 +const LABEL_FMT = 57392 +const UNWRAP = 57393 +const AVG_OVER_TIME = 57394 +const SUM_OVER_TIME = 57395 +const MIN_OVER_TIME = 57396 +const MAX_OVER_TIME = 57397 +const STDVAR_OVER_TIME = 57398 +const STDDEV_OVER_TIME = 57399 +const QUANTILE_OVER_TIME = 57400 +const BYTES_CONV = 57401 +const DURATION_CONV = 57402 +const DURATION_SECONDS_CONV = 57403 +const FIRST_OVER_TIME = 57404 +const LAST_OVER_TIME = 57405 +const ABSENT_OVER_TIME = 57406 +const VECTOR = 57407 +const LABEL_REPLACE = 57408 +const UNPACK = 57409 +const OFFSET = 57410 +const PATTERN = 57411 +const IP = 57412 +const ON = 57413 +const IGNORING = 57414 +const GROUP_LEFT = 57415 +const GROUP_RIGHT = 57416 +const DECOLORIZE = 57417 +const DROP = 57418 +const KEEP = 57419 +const OR = 57420 +const AND = 57421 +const UNLESS = 57422 +const CMP_EQ = 57423 +const NEQ = 57424 +const LT = 57425 +const LTE = 57426 +const GT = 57427 +const GTE = 57428 +const ADD = 57429 +const SUB = 57430 +const MUL = 57431 +const DIV = 57432 +const MOD = 57433 +const POW = 57434 + +var exprToknames = [...]string{ + "$end", + "error", + "$unk", + "BYTES", + "IDENTIFIER", + "STRING", + "NUMBER", + "PARSER_FLAG", + "DURATION", + "RANGE", + "MATCHERS", + "LABELS", + "EQ", + "RE", + "NRE", + "OPEN_BRACE", + "CLOSE_BRACE", + "OPEN_BRACKET", + "CLOSE_BRACKET", + "COMMA", + "DOT", + "PIPE_MATCH", + "PIPE_EXACT", + "OPEN_PARENTHESIS", + "CLOSE_PARENTHESIS", + "BY", + "WITHOUT", + "COUNT_OVER_TIME", + "RATE", + "RATE_COUNTER", + "SUM", + "SORT", + "SORT_DESC", + "AVG", + "MAX", + "MIN", + "COUNT", + "STDDEV", + "STDVAR", + "BOTTOMK", + "TOPK", + "BYTES_OVER_TIME", + "BYTES_RATE", + "BOOL", + "JSON", + "REGEXP", + "LOGFMT", + "PIPE", + "LINE_FMT", + "LABEL_FMT", + "UNWRAP", + "AVG_OVER_TIME", + "SUM_OVER_TIME", + "MIN_OVER_TIME", + "MAX_OVER_TIME", + "STDVAR_OVER_TIME", + "STDDEV_OVER_TIME", + "QUANTILE_OVER_TIME", + "BYTES_CONV", + "DURATION_CONV", + "DURATION_SECONDS_CONV", + "FIRST_OVER_TIME", + "LAST_OVER_TIME", + "ABSENT_OVER_TIME", + "VECTOR", + "LABEL_REPLACE", + "UNPACK", + "OFFSET", + "PATTERN", + "IP", + "ON", + "IGNORING", + "GROUP_LEFT", + "GROUP_RIGHT", + "DECOLORIZE", + "DROP", + "KEEP", + "OR", + "AND", + "UNLESS", + "CMP_EQ", + "NEQ", + "LT", + "LTE", + "GT", + "GTE", + "ADD", + "SUB", + "MUL", + "DIV", + "MOD", + "POW", +} +var exprStatenames = [...]string{} + +const exprEofCode = 1 +const exprErrCode = 2 +const exprInitialStackSize = 16 + + +var exprExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, +} + +const exprPrivate = 57344 + +const exprLast = 592 + +var exprAct = [...]int{ + + 287, 226, 82, 4, 212, 64, 180, 124, 202, 187, + 73, 198, 195, 63, 235, 5, 150, 185, 75, 2, + 56, 78, 48, 49, 50, 57, 58, 61, 62, 59, + 60, 51, 52, 53, 54, 55, 56, 49, 50, 57, + 58, 61, 62, 59, 60, 51, 52, 53, 54, 55, + 56, 57, 58, 61, 62, 59, 60, 51, 52, 53, + 54, 55, 56, 51, 52, 53, 54, 55, 56, 107, + 146, 148, 149, 113, 53, 54, 55, 56, 205, 148, + 149, 281, 215, 137, 290, 154, 164, 165, 213, 293, + 138, 159, 67, 293, 71, 214, 152, 295, 71, 162, + 163, 69, 70, 292, 345, 69, 70, 364, 161, 364, + 92, 384, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 71, 228, 379, 290, + 337, 228, 304, 69, 70, 192, 134, 354, 189, 147, + 200, 204, 83, 84, 372, 367, 211, 206, 209, 210, + 207, 208, 140, 217, 140, 128, 134, 291, 108, 228, + 233, 72, 371, 337, 139, 72, 227, 291, 292, 229, + 230, 182, 344, 238, 222, 128, 120, 121, 119, 225, + 129, 131, 295, 81, 71, 83, 84, 369, 246, 247, + 248, 69, 70, 72, 296, 292, 222, 329, 122, 225, + 123, 292, 250, 357, 71, 292, 130, 132, 133, 347, + 304, 69, 70, 304, 71, 353, 71, 228, 352, 299, + 283, 69, 70, 69, 70, 134, 285, 288, 328, 294, + 181, 297, 237, 107, 300, 113, 301, 228, 71, 289, + 152, 286, 237, 298, 128, 69, 70, 66, 304, 228, + 222, 72, 314, 351, 302, 134, 237, 308, 310, 313, + 315, 316, 312, 241, 200, 204, 323, 318, 322, 290, + 182, 72, 231, 223, 128, 253, 311, 134, 134, 304, + 338, 72, 237, 72, 306, 304, 326, 142, 141, 330, + 305, 332, 334, 182, 336, 107, 128, 128, 361, 335, + 346, 331, 309, 237, 107, 72, 264, 348, 219, 265, + 237, 263, 134, 325, 324, 282, 13, 120, 121, 119, + 245, 129, 131, 239, 153, 244, 382, 182, 183, 181, + 236, 128, 358, 359, 340, 341, 342, 107, 360, 122, + 243, 123, 242, 216, 362, 363, 158, 130, 132, 133, + 368, 183, 181, 151, 157, 260, 156, 218, 261, 16, + 259, 88, 13, 87, 374, 80, 375, 376, 13, 378, + 153, 255, 262, 350, 251, 303, 6, 257, 380, 256, + 21, 22, 23, 36, 45, 46, 37, 39, 40, 38, + 41, 42, 43, 44, 24, 25, 254, 240, 232, 224, + 252, 79, 234, 377, 26, 27, 28, 29, 30, 31, + 32, 13, 366, 77, 33, 34, 35, 47, 19, 6, + 365, 258, 343, 21, 22, 23, 36, 45, 46, 37, + 39, 40, 38, 41, 42, 43, 44, 24, 25, 17, + 18, 279, 333, 160, 280, 155, 278, 26, 27, 28, + 29, 30, 31, 32, 13, 89, 86, 33, 34, 35, + 47, 19, 6, 320, 321, 383, 21, 22, 23, 36, + 45, 46, 37, 39, 40, 38, 41, 42, 43, 44, + 24, 25, 17, 18, 276, 85, 373, 277, 381, 275, + 26, 27, 28, 29, 30, 31, 32, 370, 356, 355, + 33, 34, 35, 47, 19, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 144, + 188, 125, 327, 249, 273, 17, 18, 274, 270, 272, + 349, 271, 317, 269, 143, 267, 3, 145, 268, 188, + 266, 319, 186, 74, 196, 126, 307, 284, 221, 220, + 219, 218, 193, 191, 190, 203, 199, 188, 79, 196, + 111, 112, 194, 116, 201, 118, 197, 117, 115, 114, + 184, 65, 135, 127, 136, 109, 110, 91, 90, 11, + 10, 9, 20, 12, 15, 8, 339, 14, 7, 76, + 68, 1, +} +var exprPact = [...]int{ + + 352, -1000, -56, -1000, -1000, 199, 352, -1000, -1000, -1000, + -1000, -1000, -1000, 396, 341, 159, -1000, 478, 449, 339, + 337, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 199, -1000, 223, 272, 5, 84, -1000, + -1000, -1000, -1000, 263, 262, -56, 517, -1000, -1000, 57, + 346, 438, 332, 330, 322, -1000, -1000, 352, 436, 352, + 28, 13, -1000, 352, 352, 352, 352, 352, 352, 352, + 352, 352, 352, 352, 352, 352, 352, -1000, -1000, -1000, + -1000, -1000, -1000, 273, -1000, -1000, -1000, -1000, -1000, 534, + 552, 548, -1000, 547, -1000, -1000, -1000, -1000, 220, 546, + -1000, 554, 551, 550, 65, -1000, -1000, 82, 4, 319, + -1000, -1000, -1000, -1000, -1000, 553, 545, 544, 543, 542, + 248, 379, 189, 300, 247, 378, 395, 305, 298, 377, + 238, -42, 318, 316, 301, 296, -30, -30, -15, -15, + -72, -72, -72, -72, -24, -24, -24, -24, -24, -24, + 273, 220, 220, 220, 515, 354, -1000, -1000, 387, 354, + -1000, -1000, 250, -1000, 376, -1000, 358, 359, -1000, 57, + -1000, 357, -1000, 57, -1000, 351, 302, 531, 524, 520, + 480, 437, -1000, 3, 291, 82, 541, -1000, -1000, -1000, + -1000, -1000, -1000, 116, 300, 201, 157, 83, 131, 169, + 194, 116, 352, 229, 355, 265, -1000, -1000, 259, -1000, + 540, -1000, 277, 251, 237, 227, 307, 273, 151, -1000, + 354, 552, 526, -1000, 539, 458, 551, 550, 290, -1000, + -1000, -1000, 289, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 82, 516, -1000, 203, -1000, 172, 111, 55, 111, + 433, 16, 220, 16, 153, 275, 412, 147, 79, -1000, + -1000, 184, -1000, 352, 525, -1000, -1000, 353, 228, -1000, + 193, -1000, -1000, 190, -1000, 112, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 493, 492, -1000, 178, -1000, 116, + 55, 111, 55, -1000, -1000, 273, -1000, 16, -1000, 274, + -1000, -1000, -1000, 61, 410, 402, 120, 116, 162, -1000, + 491, -1000, -1000, -1000, -1000, 137, 119, -1000, -1000, 55, + -1000, 481, 59, 55, 46, 16, 16, 393, -1000, -1000, + 349, -1000, -1000, 103, 55, -1000, -1000, 16, 482, -1000, + -1000, 306, 459, 86, -1000, +} +var exprPgo = [...]int{ + + 0, 591, 18, 590, 2, 14, 536, 3, 16, 7, + 589, 588, 587, 586, 15, 585, 584, 583, 582, 95, + 581, 580, 579, 455, 578, 577, 576, 575, 13, 5, + 574, 573, 572, 6, 571, 92, 4, 570, 569, 568, + 567, 566, 11, 565, 564, 8, 563, 12, 562, 9, + 17, 561, 560, 1, 545, 521, 0, +} +var exprR1 = [...]int{ + + 0, 1, 2, 2, 7, 7, 7, 7, 7, 7, + 7, 6, 6, 6, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 53, 53, 53, 13, 13, 13, 11, 11, 11, 11, + 15, 15, 15, 15, 15, 15, 22, 3, 3, 3, + 3, 14, 14, 14, 10, 10, 9, 9, 9, 9, + 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 19, 36, 36, 36, 35, 35, 35, + 34, 34, 34, 37, 37, 27, 27, 26, 26, 26, + 26, 52, 51, 51, 38, 39, 47, 47, 48, 48, + 48, 46, 33, 33, 33, 33, 33, 33, 33, 33, + 33, 49, 49, 50, 50, 55, 55, 54, 54, 32, + 32, 32, 32, 32, 32, 32, 30, 30, 30, 30, + 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, + 42, 42, 41, 41, 40, 45, 45, 44, 44, 43, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 24, 24, 25, 25, 25, + 25, 23, 23, 23, 23, 23, 23, 23, 23, 21, + 21, 21, 17, 18, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 56, 5, 5, 4, 4, 4, 4, +} +var exprR2 = [...]int{ + + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 1, 2, 3, 2, 3, 4, 5, 3, 4, + 5, 6, 3, 4, 5, 6, 3, 4, 5, 6, + 4, 5, 6, 7, 3, 4, 4, 5, 3, 2, + 3, 6, 3, 1, 1, 1, 4, 6, 5, 7, + 4, 5, 5, 6, 7, 7, 12, 1, 1, 1, + 1, 3, 3, 2, 1, 3, 3, 3, 3, 3, + 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 4, 3, 2, 5, 4, + 1, 3, 2, 1, 2, 1, 2, 1, 2, 1, + 2, 2, 3, 2, 2, 1, 3, 3, 1, 3, + 3, 2, 1, 1, 1, 1, 3, 2, 3, 3, + 3, 3, 1, 1, 3, 6, 6, 1, 1, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 1, 1, 1, 3, 2, 1, 1, 1, 3, 2, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 0, 1, 5, 4, 5, + 4, 1, 1, 2, 4, 5, 2, 4, 5, 1, + 2, 2, 4, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 1, 3, 4, 4, 3, 3, +} +var exprChk = [...]int{ + + -1000, -1, -2, -6, -7, -14, 24, -11, -15, -20, + -21, -22, -17, 16, -12, -16, 7, 87, 88, 66, + -18, 28, 29, 30, 42, 43, 52, 53, 54, 55, + 56, 57, 58, 62, 63, 64, 31, 34, 37, 35, + 36, 38, 39, 40, 41, 32, 33, 65, 78, 79, + 80, 87, 88, 89, 90, 91, 92, 81, 82, 85, + 86, 83, 84, -28, -29, -34, 48, -35, -3, 22, + 23, 15, 82, -7, -6, -2, -10, 17, -9, 5, + 24, 24, -4, 26, 27, 7, 7, 24, 24, -23, + -24, -25, 44, -23, -23, -23, -23, -23, -23, -23, + -23, -23, -23, -23, -23, -23, -23, -29, -35, -27, + -26, -52, -51, -33, -38, -39, -46, -40, -43, 47, + 45, 46, 67, 69, -9, -55, -54, -31, 24, 49, + 75, 50, 76, 77, 5, -32, -30, 78, 6, -19, + 70, 25, 25, 17, 2, 20, 13, 82, 14, 15, + -8, 7, -14, 24, -7, 7, 24, 24, 24, -7, + 7, -2, 71, 72, 73, 74, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -33, 79, 20, 78, -37, -50, 8, -49, 5, -50, + 6, 6, -33, 6, -48, -47, 5, -41, -42, 5, + -9, -44, -45, 5, -9, 13, 82, 85, 86, 83, + 84, 81, -36, 6, -19, 78, 24, -9, 6, 6, + 6, 6, 2, 25, 20, 10, -53, -28, 48, -14, + -8, 25, 20, -7, 7, -5, 25, 5, -5, 25, + 20, 25, 24, 24, 24, 24, -33, -33, -33, 8, + -50, 20, 13, 25, 20, 13, 20, 20, 70, 9, + 4, 7, 70, 9, 4, 7, 9, 4, 7, 9, + 4, 7, 9, 4, 7, 9, 4, 7, 9, 4, + 7, 78, 24, -36, 6, -4, -8, -56, -53, -28, + 68, 10, 48, 10, -53, 51, 25, -53, -28, 25, + -4, -7, 25, 20, 20, 25, 25, 6, -5, 25, + -5, 25, 25, -5, 25, -5, -49, 6, -47, 2, + 5, 6, -42, -45, 24, 24, -36, 6, 25, 25, + -53, -28, -53, 9, -56, -33, -56, 10, 5, -13, + 59, 60, 61, 10, 25, 25, -53, 25, -7, 5, + 20, 25, 25, 25, 25, 6, 6, 25, -4, -53, + -56, 24, -56, -53, 48, 10, 10, 25, -4, 25, + 6, 25, 25, 5, -53, -56, -56, 10, 20, 25, + -56, 6, 20, 6, 25, +} +var exprDef = [...]int{ + + 0, -2, 1, 2, 3, 11, 0, 4, 5, 6, + 7, 8, 9, 0, 0, 0, 189, 0, 0, 0, + 0, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 193, 175, 175, + 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, + 175, 175, 175, 12, 70, 72, 0, 90, 0, 57, + 58, 59, 60, 3, 2, 0, 0, 63, 64, 0, + 0, 0, 0, 0, 0, 190, 191, 0, 0, 0, + 181, 182, 176, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 71, 92, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 95, + 97, 0, 99, 0, 112, 113, 114, 115, 0, 0, + 105, 0, 0, 0, 0, 127, 128, 0, 87, 0, + 83, 10, 13, 61, 62, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3, 189, 0, 0, 0, 3, + 0, 160, 0, 0, 183, 186, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 117, 0, 0, 0, 96, 103, 93, 123, 122, 101, + 98, 100, 0, 104, 111, 108, 0, 154, 152, 150, + 151, 159, 157, 155, 156, 0, 0, 0, 0, 0, + 0, 0, 91, 84, 0, 0, 0, 65, 66, 67, + 68, 69, 39, 46, 0, 14, 0, 0, 0, 0, + 0, 50, 0, 3, 189, 0, 225, 221, 0, 226, + 0, 192, 0, 0, 0, 0, 118, 119, 120, 94, + 102, 0, 0, 116, 0, 0, 0, 0, 0, 134, + 141, 148, 0, 133, 140, 147, 129, 136, 143, 130, + 137, 144, 131, 138, 145, 132, 139, 146, 135, 142, + 149, 0, 0, 89, 0, 48, 0, 15, 18, 34, + 0, 22, 0, 26, 0, 0, 0, 0, 0, 38, + 52, 3, 51, 0, 0, 223, 224, 0, 0, 178, + 0, 180, 184, 0, 187, 0, 124, 121, 109, 110, + 106, 107, 153, 158, 0, 0, 86, 0, 88, 47, + 19, 35, 36, 220, 23, 42, 27, 30, 40, 0, + 43, 44, 45, 16, 0, 0, 0, 53, 3, 222, + 0, 177, 179, 185, 188, 0, 0, 85, 49, 37, + 31, 0, 17, 20, 0, 24, 28, 0, 54, 55, + 0, 125, 126, 0, 21, 25, 29, 32, 0, 41, + 33, 0, 0, 0, 56, +} +var exprTok1 = [...]int{ + + 1, +} +var exprTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, +} +var exprTok3 = [...]int{ + 0, +} + +var exprErrorMessages = [...]struct { + state int + token int + msg string +}{} + + +/* parser for yacc output */ + +var ( + exprDebug = 0 + exprErrorVerbose = false +) + +type exprLexer interface { + Lex(lval *exprSymType) int + Error(s string) +} + +type exprParser interface { + Parse(exprLexer) int + Lookahead() int +} + +type exprParserImpl struct { + lval exprSymType + stack [exprInitialStackSize]exprSymType + char int +} + +func (p *exprParserImpl) Lookahead() int { + return p.char +} + +func exprNewParser() exprParser { + return &exprParserImpl{} +} + +const exprFlag = -1000 + +func exprTokname(c int) string { + if c >= 1 && c-1 < len(exprToknames) { + if exprToknames[c-1] != "" { + return exprToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func exprStatname(s int) string { + if s >= 0 && s < len(exprStatenames) { + if exprStatenames[s] != "" { + return exprStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func exprErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !exprErrorVerbose { + return "syntax error" + } + + for _, e := range exprErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + exprTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := exprPact[state] + for tok := TOKSTART; tok-1 < len(exprToknames); tok++ { + if n := base + tok; n >= 0 && n < exprLast && exprChk[exprAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if exprDef[state] == -2 { + i := 0 + for exprExca[i] != -1 || exprExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; exprExca[i] >= 0; i += 2 { + tok := exprExca[i] + if tok < TOKSTART || exprExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if exprExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += exprTokname(tok) + } + return res +} + +func exprlex1(lex exprLexer, lval *exprSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = exprTok1[0] + goto out + } + if char < len(exprTok1) { + token = exprTok1[char] + goto out + } + if char >= exprPrivate { + if char < exprPrivate+len(exprTok2) { + token = exprTok2[char-exprPrivate] + goto out + } + } + for i := 0; i < len(exprTok3); i += 2 { + token = exprTok3[i+0] + if token == char { + token = exprTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = exprTok2[1] /* unknown char */ + } + if exprDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", exprTokname(token), uint(char)) + } + return char, token +} + +func exprParse(exprlex exprLexer) int { + return exprNewParser().Parse(exprlex) +} + +func (exprrcvr *exprParserImpl) Parse(exprlex exprLexer) int { + var exprn int + var exprVAL exprSymType + var exprDollar []exprSymType + _ = exprDollar // silence set and not used + exprS := exprrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + exprstate := 0 + exprrcvr.char = -1 + exprtoken := -1 // exprrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + exprstate = -1 + exprrcvr.char = -1 + exprtoken = -1 + }() + exprp := -1 + goto exprstack + +ret0: + return 0 + +ret1: + return 1 + +exprstack: + /* put a state and value onto the stack */ + if exprDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", exprTokname(exprtoken), exprStatname(exprstate)) + } + + exprp++ + if exprp >= len(exprS) { + nyys := make([]exprSymType, len(exprS)*2) + copy(nyys, exprS) + exprS = nyys + } + exprS[exprp] = exprVAL + exprS[exprp].yys = exprstate + +exprnewstate: + exprn = exprPact[exprstate] + if exprn <= exprFlag { + goto exprdefault /* simple state */ + } + if exprrcvr.char < 0 { + exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) + } + exprn += exprtoken + if exprn < 0 || exprn >= exprLast { + goto exprdefault + } + exprn = exprAct[exprn] + if exprChk[exprn] == exprtoken { /* valid shift */ + exprrcvr.char = -1 + exprtoken = -1 + exprVAL = exprrcvr.lval + exprstate = exprn + if Errflag > 0 { + Errflag-- + } + goto exprstack + } + +exprdefault: + /* default state action */ + exprn = exprDef[exprstate] + if exprn == -2 { + if exprrcvr.char < 0 { + exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if exprExca[xi+0] == -1 && exprExca[xi+1] == exprstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + exprn = exprExca[xi+0] + if exprn < 0 || exprn == exprtoken { + break + } + } + exprn = exprExca[xi+1] + if exprn < 0 { + goto ret0 + } + } + if exprn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + exprlex.Error(exprErrorMessage(exprstate, exprtoken)) + Nerrs++ + if exprDebug >= 1 { + __yyfmt__.Printf("%s", exprStatname(exprstate)) + __yyfmt__.Printf(" saw %s\n", exprTokname(exprtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for exprp >= 0 { + exprn = exprPact[exprS[exprp].yys] + exprErrCode + if exprn >= 0 && exprn < exprLast { + exprstate = exprAct[exprn] /* simulate a shift of "error" */ + if exprChk[exprstate] == exprErrCode { + goto exprstack + } + } + + /* the current p has no shift on "error", pop stack */ + if exprDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", exprS[exprp].yys) + } + exprp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if exprDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", exprTokname(exprtoken)) + } + if exprtoken == exprEofCode { + goto ret1 + } + exprrcvr.char = -1 + exprtoken = -1 + goto exprnewstate /* try again in the same state */ + } + } + + /* reduction by production exprn */ + if exprDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", exprn, exprStatname(exprstate)) + } + + exprnt := exprn + exprpt := exprp + _ = exprpt // guard against "declared and not used" + + exprp -= exprR2[exprn] + // exprp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if exprp+1 >= len(exprS) { + nyys := make([]exprSymType, len(exprS)*2) + copy(nyys, exprS) + exprS = nyys + } + exprVAL = exprS[exprp+1] + + /* consult goto table to find next state */ + exprn = exprR1[exprn] + exprg := exprPgo[exprn] + exprj := exprg + exprS[exprp].yys + 1 + + if exprj >= exprLast { + exprstate = exprAct[exprg] + } else { + exprstate = exprAct[exprj] + if exprChk[exprstate] != -exprn { + exprstate = exprAct[exprg] + } + } + // dummy call; replaced with literal code + switch exprnt { + + case 1: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprlex.(*parser).expr = exprDollar[1].Expr + } + case 2: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Expr = exprDollar[1].LogExpr + } + case 3: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Expr = exprDollar[1].MetricExpr + } + case 4: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].RangeAggregationExpr + } + case 5: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].VectorAggregationExpr + } + case 6: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].BinOpExpr + } + case 7: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].LiteralExpr + } + case 8: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].LabelReplaceExpr + } + case 9: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[1].VectorExpr + } + case 10: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.MetricExpr = exprDollar[2].MetricExpr + } + case 11: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector) + } + case 12: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LogExpr = newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr) + } + case 13: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogExpr = exprDollar[2].LogExpr + } + case 14: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, nil) + } + case 15: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) + } + case 16: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, nil) + } + case 17: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, exprDollar[5].OffsetExpr) + } + case 18: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[3].UnwrapExpr, nil) + } + case 19: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[4].UnwrapExpr, exprDollar[3].OffsetExpr) + } + case 20: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[5].UnwrapExpr, nil) + } + case 21: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[6].UnwrapExpr, exprDollar[5].OffsetExpr) + } + case 22: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, nil) + } + case 23: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, exprDollar[4].OffsetExpr) + } + case 24: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, nil) + } + case 25: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, exprDollar[6].OffsetExpr) + } + case 26: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, nil) + } + case 27: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, exprDollar[4].OffsetExpr) + } + case 28: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, nil) + } + case 29: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, exprDollar[6].OffsetExpr) + } + case 30: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, nil) + } + case 31: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, exprDollar[5].OffsetExpr) + } + case 32: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, nil) + } + case 33: + exprDollar = exprS[exprpt-7 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, exprDollar[7].OffsetExpr) + } + case 34: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, nil, nil) + } + case 35: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) + } + case 36: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, exprDollar[4].UnwrapExpr, nil) + } + case 37: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, exprDollar[5].UnwrapExpr, exprDollar[3].OffsetExpr) + } + case 38: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogRangeExpr = exprDollar[2].LogRangeExpr + } + case 40: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[3].str, "") + } + case 41: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[5].str, exprDollar[3].ConvOp) + } + case 42: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.UnwrapExpr = exprDollar[1].UnwrapExpr.addPostFilter(exprDollar[3].LabelFilter) + } + case 43: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.ConvOp = OpConvBytes + } + case 44: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.ConvOp = OpConvDuration + } + case 45: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.ConvOp = OpConvDurationSeconds + } + case 46: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, nil, nil) + } + case 47: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, nil, &exprDollar[3].str) + } + case 48: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[5].Grouping, nil) + } + case 49: + exprDollar = exprS[exprpt-7 : exprpt+1] + { + exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[7].Grouping, &exprDollar[3].str) + } + case 50: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, nil, nil) + } + case 51: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil) + } + case 52: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil) + } + case 53: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str) + } + case 54: + exprDollar = exprS[exprpt-7 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str) + } + case 55: + exprDollar = exprS[exprpt-7 : exprpt+1] + { + exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[6].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, &exprDollar[4].str) + } + case 56: + exprDollar = exprS[exprpt-12 : exprpt+1] + { + exprVAL.LabelReplaceExpr = mustNewLabelReplaceExpr(exprDollar[3].MetricExpr, exprDollar[5].str, exprDollar[7].str, exprDollar[9].str, exprDollar[11].str) + } + case 57: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Filter = labels.MatchRegexp + } + case 58: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Filter = labels.MatchEqual + } + case 59: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Filter = labels.MatchNotRegexp + } + case 60: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Filter = labels.MatchNotEqual + } + case 61: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Selector = exprDollar[2].Matchers + } + case 62: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Selector = exprDollar[2].Matchers + } + case 63: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + } + case 64: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher} + } + case 65: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher) + } + case 66: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str) + } + case 67: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str) + } + case 68: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str) + } + case 69: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str) + } + case 70: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.PipelineExpr = MultiStageExpr{exprDollar[1].PipelineStage} + } + case 71: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineExpr = append(exprDollar[1].PipelineExpr, exprDollar[2].PipelineStage) + } + case 72: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[1].LineFilters + } + case 73: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].LogfmtParser + } + case 74: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].LabelParser + } + case 75: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].JSONExpressionParser + } + case 76: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].LogfmtExpressionParser + } + case 77: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = &LabelFilterExpr{LabelFilterer: exprDollar[2].LabelFilter} + } + case 78: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].LineFormatExpr + } + case 79: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].DecolorizeExpr + } + case 80: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].LabelFormatExpr + } + case 81: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].DropLabelsExpr + } + case 82: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].KeepLabelsExpr + } + case 83: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.FilterOp = OpFilterIP + } + case 84: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.OrFilter = newLineFilterExpr(labels.MatchEqual, "", exprDollar[1].str) + } + case 85: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.OrFilter = newLineFilterExpr(labels.MatchEqual, exprDollar[1].FilterOp, exprDollar[3].str) + } + case 86: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.OrFilter = newOrLineFilter(newLineFilterExpr(labels.MatchEqual, "", exprDollar[1].str), exprDollar[3].OrFilter) + } + case 87: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str) + } + case 88: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, exprDollar[2].FilterOp, exprDollar[4].str) + } + case 89: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.LineFilter = newOrLineFilter(newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str), exprDollar[4].OrFilter) + } + case 90: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LineFilters = exprDollar[1].LineFilter + } + case 91: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LineFilters = newOrLineFilter(exprDollar[1].LineFilter, exprDollar[3].OrFilter) + } + case 92: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LineFilters = newNestedLineFilterExpr(exprDollar[1].LineFilters, exprDollar[2].LineFilter) + } + case 93: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.ParserFlags = []string{exprDollar[1].str} + } + case 94: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.ParserFlags = append(exprDollar[1].ParserFlags, exprDollar[2].str) + } + case 95: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LogfmtParser = newLogfmtParserExpr(nil) + } + case 96: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LogfmtParser = newLogfmtParserExpr(exprDollar[2].ParserFlags) + } + case 97: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelParser = newLabelParserExpr(OpParserTypeJSON, "") + } + case 98: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LabelParser = newLabelParserExpr(OpParserTypeRegexp, exprDollar[2].str) + } + case 99: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelParser = newLabelParserExpr(OpParserTypeUnpack, "") + } + case 100: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LabelParser = newLabelParserExpr(OpParserTypePattern, exprDollar[2].str) + } + case 101: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.JSONExpressionParser = newJSONExpressionParser(exprDollar[2].LabelExtractionExpressionList) + } + case 102: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LogfmtExpressionParser = newLogfmtExpressionParser(exprDollar[3].LabelExtractionExpressionList, exprDollar[2].ParserFlags) + } + case 103: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LogfmtExpressionParser = newLogfmtExpressionParser(exprDollar[2].LabelExtractionExpressionList, nil) + } + case 104: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LineFormatExpr = newLineFmtExpr(exprDollar[2].str) + } + case 105: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.DecolorizeExpr = newDecolorizeExpr() + } + case 106: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFormat = log.NewRenameLabelFmt(exprDollar[1].str, exprDollar[3].str) + } + case 107: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFormat = log.NewTemplateLabelFmt(exprDollar[1].str, exprDollar[3].str) + } + case 108: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelsFormat = []log.LabelFmt{exprDollar[1].LabelFormat} + } + case 109: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelsFormat = append(exprDollar[1].LabelsFormat, exprDollar[3].LabelFormat) + } + case 111: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LabelFormatExpr = newLabelFmtExpr(exprDollar[2].LabelsFormat) + } + case 112: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelFilter = log.NewStringLabelFilter(exprDollar[1].Matcher) + } + case 113: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelFilter = exprDollar[1].IPLabelFilter + } + case 114: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelFilter = exprDollar[1].UnitFilter + } + case 115: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelFilter = exprDollar[1].NumberFilter + } + case 116: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFilter = exprDollar[2].LabelFilter + } + case 117: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[2].LabelFilter) + } + case 118: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) + } + case 119: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) + } + case 120: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelFilter = log.NewOrLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) + } + case 121: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[3].str) + } + case 122: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[1].str) + } + case 123: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LabelExtractionExpressionList = []log.LabelExtractionExpr{exprDollar[1].LabelExtractionExpression} + } + case 124: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.LabelExtractionExpressionList = append(exprDollar[1].LabelExtractionExpressionList, exprDollar[3].LabelExtractionExpression) + } + case 125: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterEqual) + } + case 126: + exprDollar = exprS[exprpt-6 : exprpt+1] + { + exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterNotEqual) + } + case 127: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.UnitFilter = exprDollar[1].DurationFilter + } + case 128: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.UnitFilter = exprDollar[1].BytesFilter + } + case 129: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].duration) + } + case 130: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].duration) + } + case 131: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].duration) + } + case 132: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].duration) + } + case 133: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].duration) + } + case 134: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) + } + case 135: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) + } + case 136: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].bytes) + } + case 137: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) + } + case 138: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].bytes) + } + case 139: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) + } + case 140: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].bytes) + } + case 141: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) + } + case 142: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) + } + case 143: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 144: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 145: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 146: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 147: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 148: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 149: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) + } + case 150: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.DropLabel = log.NewDropLabel(nil, exprDollar[1].str) + } + case 151: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.DropLabel = log.NewDropLabel(exprDollar[1].Matcher, "") + } + case 152: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.DropLabels = []log.DropLabel{exprDollar[1].DropLabel} + } + case 153: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.DropLabels = append(exprDollar[1].DropLabels, exprDollar[3].DropLabel) + } + case 154: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.DropLabelsExpr = newDropLabelsExpr(exprDollar[2].DropLabels) + } + case 155: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.KeepLabel = log.NewKeepLabel(nil, exprDollar[1].str) + } + case 156: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.KeepLabel = log.NewKeepLabel(exprDollar[1].Matcher, "") + } + case 157: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.KeepLabels = []log.KeepLabel{exprDollar[1].KeepLabel} + } + case 158: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.KeepLabels = append(exprDollar[1].KeepLabels, exprDollar[3].KeepLabel) + } + case 159: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.KeepLabelsExpr = newKeepLabelsExpr(exprDollar[2].KeepLabels) + } + case 160: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("or", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 161: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("and", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 162: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("unless", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 163: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("+", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 164: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("-", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 165: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("*", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 166: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("/", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 167: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("%", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 168: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("^", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 169: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("==", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 170: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("!=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 171: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr(">", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 172: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr(">=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 173: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("<", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 174: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpExpr = mustNewBinOpExpr("<=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) + } + case 175: + exprDollar = exprS[exprpt-0 : exprpt+1] + { + exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} + } + case 176: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true} + } + case 177: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier + exprVAL.OnOrIgnoringModifier.VectorMatching.On = true + exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels + } + case 178: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier + exprVAL.OnOrIgnoringModifier.VectorMatching.On = true + } + case 179: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier + exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels + } + case 180: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier + } + case 181: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].BoolModifier + } + case 182: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + } + case 183: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne + } + case 184: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne + } + case 185: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne + exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels + } + case 186: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany + } + case 187: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany + } + case 188: + exprDollar = exprS[exprpt-5 : exprpt+1] + { + exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier + exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany + exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels + } + case 189: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false) + } + case 190: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false) + } + case 191: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, true) + } + case 192: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.VectorExpr = NewVectorExpr(exprDollar[3].str) + } + case 193: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Vector = OpTypeVector + } + case 194: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeSum + } + case 195: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeAvg + } + case 196: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeCount + } + case 197: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeMax + } + case 198: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeMin + } + case 199: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeStddev + } + case 200: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeStdvar + } + case 201: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeBottomK + } + case 202: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeTopK + } + case 203: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeSort + } + case 204: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.VectorOp = OpTypeSortDesc + } + case 205: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeCount + } + case 206: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeRate + } + case 207: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeRateCounter + } + case 208: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeBytes + } + case 209: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeBytesRate + } + case 210: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeAvg + } + case 211: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeSum + } + case 212: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeMin + } + case 213: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeMax + } + case 214: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeStdvar + } + case 215: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeStddev + } + case 216: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeQuantile + } + case 217: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeFirst + } + case 218: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeLast + } + case 219: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.RangeOp = OpRangeTypeAbsent + } + case 220: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.OffsetExpr = newOffsetExpr(exprDollar[2].duration) + } + case 221: + exprDollar = exprS[exprpt-1 : exprpt+1] + { + exprVAL.Labels = []string{exprDollar[1].str} + } + case 222: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str) + } + case 223: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.Grouping = &Grouping{Without: false, Groups: exprDollar[3].Labels} + } + case 224: + exprDollar = exprS[exprpt-4 : exprpt+1] + { + exprVAL.Grouping = &Grouping{Without: true, Groups: exprDollar[3].Labels} + } + case 225: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Grouping = &Grouping{Without: false, Groups: nil} + } + case 226: + exprDollar = exprS[exprpt-3 : exprpt+1] + { + exprVAL.Grouping = &Grouping{Without: true, Groups: nil} + } + } + goto exprstack /* stack new state and value */ +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/extractor.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/extractor.go new file mode 100644 index 00000000..922cd25c --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/extractor.go @@ -0,0 +1,84 @@ +package syntax + +import ( + "fmt" + "sort" + + "github.com/grafana/loki/pkg/logql/log" +) + +const UnsupportedErr = "unsupported range vector aggregation operation: %s" + +func (r RangeAggregationExpr) Extractor() (log.SampleExtractor, error) { + return r.extractor(nil) +} + +// extractor creates a SampleExtractor but allows for the grouping to be overridden. +func (r RangeAggregationExpr) extractor(override *Grouping) (log.SampleExtractor, error) { + if r.err != nil { + return nil, r.err + } + if err := r.validate(); err != nil { + return nil, err + } + var groups []string + var without bool + var noLabels bool + + // TODO(owen-d|cyriltovena): override grouping (i.e. from a parent `sum`) + // technically can break the query. + // For intance, in `sum by (foo) (max_over_time by (bar) (...))` + // the `by (bar)` grouping in the child is ignored in favor of the parent's `by (foo)` + for _, grp := range []*Grouping{r.Grouping, override} { + if grp != nil { + groups = grp.Groups + without = grp.Without + noLabels = grp.Singleton() + } + } + + // absent_over_time cannot be grouped (yet?), so set noLabels=true + // to make extraction more efficient and less likely to strip per query series limits. + if r.Operation == OpRangeTypeAbsent { + noLabels = true + } + + sort.Strings(groups) + + var stages []log.Stage + if p, ok := r.Left.Left.(*PipelineExpr); ok { + // if the expression is a pipeline then take all stages into account first. + st, err := p.MultiStages.stages() + if err != nil { + return nil, err + } + stages = st + } + // unwrap...means we want to extract metrics from labels. + if r.Left.Unwrap != nil { + var convOp string + switch r.Left.Unwrap.Operation { + case OpConvBytes: + convOp = log.ConvertBytes + case OpConvDuration, OpConvDurationSeconds: + convOp = log.ConvertDuration + default: + convOp = log.ConvertFloat + } + + return log.LabelExtractorWithStages( + r.Left.Unwrap.Identifier, + convOp, groups, without, noLabels, stages, + log.ReduceAndLabelFilter(r.Left.Unwrap.PostFilters), + ) + } + // otherwise we extract metrics from the log line. + switch r.Operation { + case OpRangeTypeRate, OpRangeTypeCount, OpRangeTypeAbsent: + return log.NewLineSampleExtractor(log.CountExtractor, stages, groups, without, noLabels) + case OpRangeTypeBytes, OpRangeTypeBytesRate: + return log.NewLineSampleExtractor(log.BytesExtractor, stages, groups, without, noLabels) + default: + return nil, fmt.Errorf(UnsupportedErr, r.Operation) + } +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/fuzz.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/fuzz.go new file mode 100644 index 00000000..f6d27926 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/fuzz.go @@ -0,0 +1,12 @@ +//go:build gofuzz +// +build gofuzz + +package syntax + +func FuzzParseExpr(data []byte) int { + _, err := ParseExpr(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/lex.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/lex.go new file mode 100644 index 00000000..14db797e --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/lex.go @@ -0,0 +1,415 @@ +package syntax + +import ( + "strings" + "text/scanner" + "time" + "unicode" + "unicode/utf8" + + "github.com/dustin/go-humanize" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/util/strutil" + + "github.com/grafana/loki/pkg/logqlmodel" +) + +var tokens = map[string]int{ + ",": COMMA, + ".": DOT, + "{": OPEN_BRACE, + "}": CLOSE_BRACE, + "=": EQ, + OpTypeNEQ: NEQ, + "=~": RE, + "!~": NRE, + "|=": PIPE_EXACT, + "|~": PIPE_MATCH, + OpPipe: PIPE, + OpUnwrap: UNWRAP, + "(": OPEN_PARENTHESIS, + ")": CLOSE_PARENTHESIS, + "by": BY, + "without": WITHOUT, + "bool": BOOL, + "[": OPEN_BRACKET, + "]": CLOSE_BRACKET, + OpLabelReplace: LABEL_REPLACE, + OpOffset: OFFSET, + OpOn: ON, + OpIgnoring: IGNORING, + OpGroupLeft: GROUP_LEFT, + OpGroupRight: GROUP_RIGHT, + + // binops + OpTypeOr: OR, + OpTypeAnd: AND, + OpTypeUnless: UNLESS, + OpTypeAdd: ADD, + OpTypeSub: SUB, + OpTypeMul: MUL, + OpTypeDiv: DIV, + OpTypeMod: MOD, + OpTypePow: POW, + // comparison binops + OpTypeCmpEQ: CMP_EQ, + OpTypeGT: GT, + OpTypeGTE: GTE, + OpTypeLT: LT, + OpTypeLTE: LTE, + + // parsers + OpParserTypeJSON: JSON, + OpParserTypeRegexp: REGEXP, + OpParserTypeLogfmt: LOGFMT, + OpParserTypeUnpack: UNPACK, + OpParserTypePattern: PATTERN, + + // fmt + OpFmtLabel: LABEL_FMT, + OpFmtLine: LINE_FMT, + + // filter functions + OpFilterIP: IP, + OpDecolorize: DECOLORIZE, + + // drop labels + OpDrop: DROP, + + // keep labels + OpKeep: KEEP, +} + +var parserFlags = map[string]struct{}{ + OpStrict: {}, + OpKeepEmpty: {}, +} + +// functionTokens are tokens that needs to be suffixes with parenthesis +var functionTokens = map[string]int{ + // range vec ops + OpRangeTypeRate: RATE, + OpRangeTypeRateCounter: RATE_COUNTER, + OpRangeTypeCount: COUNT_OVER_TIME, + OpRangeTypeBytesRate: BYTES_RATE, + OpRangeTypeBytes: BYTES_OVER_TIME, + OpRangeTypeAvg: AVG_OVER_TIME, + OpRangeTypeSum: SUM_OVER_TIME, + OpRangeTypeMin: MIN_OVER_TIME, + OpRangeTypeMax: MAX_OVER_TIME, + OpRangeTypeStdvar: STDVAR_OVER_TIME, + OpRangeTypeStddev: STDDEV_OVER_TIME, + OpRangeTypeQuantile: QUANTILE_OVER_TIME, + OpRangeTypeFirst: FIRST_OVER_TIME, + OpRangeTypeLast: LAST_OVER_TIME, + OpRangeTypeAbsent: ABSENT_OVER_TIME, + OpTypeVector: VECTOR, + + // vec ops + OpTypeSum: SUM, + OpTypeAvg: AVG, + OpTypeMax: MAX, + OpTypeMin: MIN, + OpTypeCount: COUNT, + OpTypeStddev: STDDEV, + OpTypeStdvar: STDVAR, + OpTypeBottomK: BOTTOMK, + OpTypeTopK: TOPK, + OpTypeSort: SORT, + OpTypeSortDesc: SORT_DESC, + OpLabelReplace: LABEL_REPLACE, + + // conversion Op + OpConvBytes: BYTES_CONV, + OpConvDuration: DURATION_CONV, + OpConvDurationSeconds: DURATION_SECONDS_CONV, + + // filterOp + OpFilterIP: IP, +} + +type lexer struct { + Scanner + errs []logqlmodel.ParseError + builder strings.Builder +} + +func (l *lexer) Lex(lval *exprSymType) int { + r := l.Scan() + + switch r { + case '#': + // Scan until a newline or EOF is encountered + //nolint:revive + for next := l.Peek(); !(next == '\n' || next == scanner.EOF); next = l.Next() { + } + + return l.Lex(lval) + + case scanner.EOF: + return 0 + + case scanner.Int, scanner.Float: + numberText := l.TokenText() + + duration, ok := tryScanDuration(numberText, &l.Scanner) + if ok { + lval.duration = duration + return DURATION + } + + bytes, ok := tryScanBytes(numberText, &l.Scanner) + if ok { + lval.bytes = bytes + return BYTES + } + + lval.str = numberText + return NUMBER + case '-': // handle flags and negative durations + if l.Peek() == '-' { + if flag, ok := tryScanFlag(&l.Scanner); ok { + lval.str = flag + return PARSER_FLAG + } + } + + tokenText := l.TokenText() + if duration, ok := tryScanDuration(tokenText, &l.Scanner); ok { + lval.duration = duration + return DURATION + } + + case scanner.String, scanner.RawString: + var err error + tokenText := l.TokenText() + if !utf8.ValidString(tokenText) { + l.Error("invalid UTF-8 rune") + return 0 + } + lval.str, err = strutil.Unquote(tokenText) + if err != nil { + l.Error(err.Error()) + return 0 + } + return STRING + } + + // scanning duration tokens + if r == '[' { + l.builder.Reset() + for r := l.Next(); r != scanner.EOF; r = l.Next() { + if r == ']' { + i, err := model.ParseDuration(l.builder.String()) + if err != nil { + l.Error(err.Error()) + return 0 + } + lval.duration = time.Duration(i) + return RANGE + } + _, _ = l.builder.WriteRune(r) + } + l.Error("missing closing ']' in duration") + return 0 + } + + tokenText := l.TokenText() + tokenTextLower := strings.ToLower(l.TokenText()) + tokenNext := strings.ToLower(tokenText + string(l.Peek())) + + if tok, ok := functionTokens[tokenNext]; ok { + // create a copy to advance to the entire token for testing suffix + sc := l.Scanner + sc.Next() + if isFunction(sc) { + l.Next() + return tok + } + } + + if tok, ok := functionTokens[tokenTextLower]; ok { + if !isFunction(l.Scanner) { + lval.str = tokenText + return IDENTIFIER + } + return tok + } + + if tok, ok := tokens[tokenNext]; ok { + l.Next() + return tok + } + + if tok, ok := tokens[tokenTextLower]; ok { + return tok + } + + lval.str = tokenText + return IDENTIFIER +} + +func (l *lexer) Error(msg string) { + l.errs = append(l.errs, logqlmodel.NewParseError(msg, l.Line, l.Column)) +} + +// tryScanFlag scans for a parser flag and returns it on success +// it advances the scanner only if a valid flag is found +func tryScanFlag(l *Scanner) (string, bool) { + var sb strings.Builder + sb.WriteString(l.TokenText()) + + // copy the scanner to avoid advancing it in case it's not a flag + s := *l + consumed := 0 + for r := s.Peek(); unicode.IsLetter(r) || r == '-'; r = s.Peek() { + _, _ = sb.WriteRune(r) + _ = s.Next() + + consumed++ + } + + flag := sb.String() + if _, ok := parserFlags[flag]; !ok { + return "", false + } + + // consume the scanner + for i := 0; i < consumed; i++ { + _ = l.Next() + } + + return flag, true +} + +func tryScanDuration(number string, l *Scanner) (time.Duration, bool) { + var sb strings.Builder + sb.WriteString(number) + // copy the scanner to avoid advancing it in case it's not a duration. + s := *l + consumed := 0 + for r := s.Peek(); r != scanner.EOF && !unicode.IsSpace(r); r = s.Peek() { + if !unicode.IsNumber(r) && !isDurationRune(r) && r != '.' { + break + } + _, _ = sb.WriteRune(r) + _ = s.Next() + consumed++ + } + + if consumed == 0 { + return 0, false + } + // we've found more characters before a whitespace or the end + durationString := sb.String() + duration, err := parseDuration(durationString) + if err != nil { + return 0, false + } + + // we need to consume the scanner, now that we know this is a duration. + for i := 0; i < consumed; i++ { + _ = l.Next() + } + + return duration, true +} + +func parseDuration(d string) (time.Duration, error) { + var duration time.Duration + // Try to parse promql style durations first, to ensure that we support the same duration + // units as promql + prometheusDuration, err := model.ParseDuration(d) + if err != nil { + // Fall back to standard library's time.ParseDuration if a promql style + // duration couldn't be parsed. + duration, err = time.ParseDuration(d) + if err != nil { + return 0, err + } + } else { + duration = time.Duration(prometheusDuration) + } + + return duration, nil +} + +func isDurationRune(r rune) bool { + // "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w", "y". + switch r { + case 'n', 'u', 'µ', 'm', 's', 'h', 'd', 'w', 'y': + return true + default: + return false + } +} + +func tryScanBytes(number string, l *Scanner) (uint64, bool) { + var sb strings.Builder + sb.WriteString(number) + // copy the scanner to avoid advancing it in case it's not a duration. + s := *l + consumed := 0 + for r := s.Peek(); r != scanner.EOF && !unicode.IsSpace(r); r = s.Peek() { + if !unicode.IsNumber(r) && !isBytesSizeRune(r) && r != '.' { + break + } + _, _ = sb.WriteRune(r) + _ = s.Next() + consumed++ + } + + if consumed == 0 { + return 0, false + } + // we've found more characters before a whitespace or the end + b, err := humanize.ParseBytes(sb.String()) + if err != nil { + return 0, false + } + // we need to consume the scanner, now that we know this is a duration. + for i := 0; i < consumed; i++ { + _ = l.Next() + } + return b, true +} + +func isBytesSizeRune(r rune) bool { + // Accept: B, kB, MB, GB, TB, PB, KB, KiB, MiB, GiB, TiB, PiB + // Do not accept: EB, ZB, YB, PiB, ZiB and YiB. They are not supported since the value migh not be represented in an uint64 + switch r { + case 'B', 'i', 'k', 'K', 'M', 'G', 'T', 'P': + return true + default: + return false + } +} + +// isFunction check if the next runes are either an open parenthesis +// or by/without tokens. This allows to dissociate functions and identifier correctly. +func isFunction(sc Scanner) bool { + var sb strings.Builder + sc = trimSpace(sc) + for r := sc.Next(); r != scanner.EOF; r = sc.Next() { + sb.WriteRune(r) + switch strings.ToLower(sb.String()) { + case "(": + return true + case "by", "without": + sc = trimSpace(sc) + return sc.Next() == '(' + } + } + return false +} + +func trimSpace(l Scanner) Scanner { + for n := l.Peek(); n != scanner.EOF; n = l.Peek() { + if unicode.IsSpace(n) { + l.Next() + continue + } + return l + } + return l +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/parser.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/parser.go new file mode 100644 index 00000000..710bf713 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/parser.go @@ -0,0 +1,267 @@ +package syntax + +import ( + "errors" + "fmt" + "sort" + "strings" + "sync" + + "github.com/prometheus/prometheus/model/labels" + promql_parser "github.com/prometheus/prometheus/promql/parser" + + "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/util" +) + +const ( + EmptyMatchers = "{}" + + errAtleastOneEqualityMatcherRequired = "queries require at least one regexp or equality matcher that does not have an empty-compatible value. For instance, app=~\".*\" does not meet this requirement, but app=~\".+\" will" +) + +var parserPool = sync.Pool{ + New: func() interface{} { + p := &parser{ + p: &exprParserImpl{}, + Reader: strings.NewReader(""), + lexer: &lexer{}, + } + return p + }, +} + +const maxInputSize = 5120 + +func init() { + // Improve the error messages coming out of yacc. + exprErrorVerbose = true + // uncomment when you need to understand yacc rule tree. + // exprDebug = 3 + for str, tok := range tokens { + exprToknames[tok-exprPrivate+1] = str + } +} + +type parser struct { + p *exprParserImpl + *lexer + expr Expr + *strings.Reader +} + +func (p *parser) Parse() (Expr, error) { + p.lexer.errs = p.lexer.errs[:0] + p.lexer.Scanner.Error = func(_ *Scanner, msg string) { + p.lexer.Error(msg) + } + e := p.p.Parse(p) + if e != 0 || len(p.lexer.errs) > 0 { + return nil, p.lexer.errs[0] + } + return p.expr, nil +} + +// ParseExpr parses a string and returns an Expr. +func ParseExpr(input string) (Expr, error) { + expr, err := ParseExprWithoutValidation(input) + if err != nil { + return nil, err + } + if err := validateExpr(expr); err != nil { + return nil, err + } + return expr, nil +} + +func ParseExprWithoutValidation(input string) (expr Expr, err error) { + if len(input) >= maxInputSize { + return nil, logqlmodel.NewParseError(fmt.Sprintf("input size too long (%d > %d)", len(input), maxInputSize), 0, 0) + } + + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(error); ok { + if errors.Is(err, logqlmodel.ErrParse) { + return + } + err = logqlmodel.NewParseError(err.Error(), 0, 0) + } + } + }() + + p := parserPool.Get().(*parser) + defer parserPool.Put(p) + + p.Reader.Reset(input) + p.lexer.Init(p.Reader) + return p.Parse() +} + +func MustParseExpr(input string) Expr { + expr, err := ParseExpr(input) + if err != nil { + panic(err) + } + return expr +} + +func validateExpr(expr Expr) error { + switch e := expr.(type) { + case SampleExpr: + return validateSampleExpr(e) + case LogSelectorExpr: + return validateLogSelectorExpression(e) + default: + return logqlmodel.NewParseError(fmt.Sprintf("unexpected expression type: %v", e), 0, 0) + } +} + +// validateMatchers checks whether a query would touch all the streams in the query range or uses at least one matcher to select specific streams. +func validateMatchers(matchers []*labels.Matcher) error { + _, matchers = util.SplitFiltersAndMatchers(matchers) + if len(matchers) == 0 { + return logqlmodel.NewParseError(errAtleastOneEqualityMatcherRequired, 0, 0) + } + return nil +} + +// ParseMatchers parses a string and returns labels matchers, if the expression contains +// anything else it will return an error. +func ParseMatchers(input string, validate bool) ([]*labels.Matcher, error) { + var ( + expr Expr + err error + ) + + if validate { + expr, err = ParseExpr(input) + } else { + expr, err = ParseExprWithoutValidation(input) + } + + if err != nil { + return nil, err + } + matcherExpr, ok := expr.(*MatchersExpr) + if !ok { + return nil, logqlmodel.ErrParseMatchers + } + return matcherExpr.Mts, nil +} + +func MatchersString(xs []*labels.Matcher) string { + return newMatcherExpr(xs).String() +} + +// ParseSampleExpr parses a string and returns the sampleExpr +func ParseSampleExpr(input string) (SampleExpr, error) { + expr, err := ParseExpr(input) + if err != nil { + return nil, err + } + sampleExpr, ok := expr.(SampleExpr) + if !ok { + return nil, errors.New("only sample expression supported") + } + + return sampleExpr, nil +} + +func validateSampleExpr(expr SampleExpr) error { + switch e := expr.(type) { + case *BinOpExpr: + if e.err != nil { + return e.err + } + if err := validateSampleExpr(e.SampleExpr); err != nil { + return err + } + return validateSampleExpr(e.RHS) + case *LiteralExpr: + if e.err != nil { + return e.err + } + return nil + case *VectorExpr: + if e.err != nil { + return e.err + } + return nil + case *VectorAggregationExpr: + if e.err != nil { + return e.err + } + if e.Operation == OpTypeSort || e.Operation == OpTypeSortDesc { + if err := validateSortGrouping(e.Grouping); err != nil { + return err + } + } + return validateSampleExpr(e.Left) + default: + selector, err := e.Selector() + if err != nil { + return err + } + return validateLogSelectorExpression(selector) + } +} + +func validateLogSelectorExpression(expr LogSelectorExpr) error { + switch e := expr.(type) { + case *VectorExpr: + return nil + default: + return validateMatchers(e.Matchers()) + } +} + +// validateSortGrouping prevent by|without groupings on sort operations. +// This will keep compatibility with promql and allowing sort by (foo) doesn't make much sense anyway when sort orders by value instead of labels. +func validateSortGrouping(grouping *Grouping) error { + if grouping != nil && len(grouping.Groups) > 0 { + return logqlmodel.NewParseError("sort and sort_desc doesn't allow grouping by ", 0, 0) + } + return nil +} + +// ParseLogSelector parses a log selector expression `{app="foo"} |= "filter"` +func ParseLogSelector(input string, validate bool) (LogSelectorExpr, error) { + expr, err := ParseExprWithoutValidation(input) + if err != nil { + return nil, err + } + logSelector, ok := expr.(LogSelectorExpr) + if !ok { + return nil, errors.New("only log selector is supported") + } + if validate { + if err := validateExpr(expr); err != nil { + return nil, err + } + } + return logSelector, nil +} + +// ParseLabels parses labels from a string using logql parser. +func ParseLabels(lbs string) (labels.Labels, error) { + ls, err := promql_parser.ParseMetric(lbs) + if err != nil { + return nil, err + } + // Sort labels to ensure functionally equivalent + // inputs map to the same output + sort.Sort(ls) + + // Use the label builder to trim empty label values. + // Empty label values are equivalent to absent labels + // in Prometheus, but they unfortunately alter the + // Hash values created. This can cause problems in Loki + // if we can't rely on a set of labels to have a deterministic + // hash value. + // Therefore we must normalize early in the write path. + // See https://github.com/grafana/loki/pull/7355 + // for more information + return labels.NewBuilder(ls).Labels(), nil +} diff --git a/vendor/github.com/grafana/loki/pkg/logql/syntax/prettier.go b/vendor/github.com/grafana/loki/pkg/logql/syntax/prettier.go new file mode 100644 index 00000000..cf346e26 --- /dev/null +++ b/vendor/github.com/grafana/loki/pkg/logql/syntax/prettier.go @@ -0,0 +1,422 @@ +// LogQL formatter is inspired from PromQL formatter +// https://github.com/prometheus/prometheus/blob/release-2.40/promql/parser/prettier.go +// https://youtu.be/pjkWzDVxWk4?t=24469 + +package syntax + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/common/model" +) + +// How LogQL formatter works? +// ========================= +// General idea is to parse the LogQL query(string) and converts it into AST(expressions) first, then format each expression from bottom up (from leaf expressions to the root expression). Every expression in AST has a level/depth (distance from the root), that is passed by it's parent. +// +// While prettifying an expression, we consider two things: +// 1. Did the current expression's parent add a new line? +// 2. Does the current expression exceeds `maxCharsPerLine` limit? +// +// The level of a expression determines if it should be indented or not. +// The answer to the 1 is NO if the level passed is 0. This means, the +// parent expression did not apply a new line, so the current Node must not +// apply any indentation as prefix. +// If level > 1, a new line is applied by the parent. So, the current expression +// should prefix an indentation before writing any of its content. This indentation +// will be ([level/depth of current expression] * " "). +// +// The answer to 2 is YES if the normalized length of the current expression exceeds +// the `maxCharsPerLine` limit. Hence, it applies the indentation equal to +// its depth and increments the level by 1 before passing down the child. +// If the answer is NO, the current expression returns the normalized string value of itself. +// + +var ( + // maxCharsPerLine is used to qualify whether some LogQL expressions are worth `splitting` into new lines. + maxCharsPerLine = 100 +) + +func Prettify(e Expr) string { + return e.Pretty(0) +} + +// e.g: `{foo="bar"}` +func (e *MatchersExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: `{foo="bar"} | logfmt | level="error"` +// Here, left = `{foo="bar"}` and multistages would collection of each stage in pipeline, here `logfmt` and `level="error"` +func (e *PipelineExpr) Pretty(level int) string { + if !needSplit(e) { + return indent(level) + e.String() + } + + s := fmt.Sprintf("%s\n", e.Left.Pretty(level)) + for i, ms := range e.MultiStages { + s += ms.Pretty(level + 1) + //NOTE: Needed because, we tend to format multiple stage in pipeline as each stage in single line + // e.g: + // | logfmt + // | level = "error" + // But all the stages will have same indent level. So here we don't increase level. + if i < len(e.MultiStages)-1 { + s += "\n" + } + } + return s +} + +// e.g: `|= "error" != "memcache" |= ip("192.168.0.1")` +// NOTE: here `ip` is Op in this expression. +func (e *LineFilterExpr) Pretty(level int) string { + if !needSplit(e) { + return indent(level) + e.String() + } + + var s string + + if e.Left != nil { + // s += indent(level) + s += e.Left.Pretty(level) + // NOTE: Similar to PiplelinExpr, we also have to format every LineFilterExpr in new line. But with same indendation level. + // e.g: + // |= "error" + // != "memcached" + // |= ip("192.168.0.1") + s += "\n" + } + + s += indent(level) + + // We re-use LineFilterExpr's String() implementation to avoid duplication. + // We create new LineFilterExpr without `Left`. + ne := newLineFilterExpr(e.Ty, e.Op, e.Match) + s += ne.String() + + return s +} + +func (e *LogfmtParserExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: +// `| json` +// `| regexp` +// `| pattern` +// `| unpack` +func (e *LabelParserExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +func (e *DropLabelsExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +func (e *KeepLabelsExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: | level!="error" +func (e *LabelFilterExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: | line_format "{{ .label }}" +func (e *LineFmtExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: | decolorize +func (e *DecolorizeExpr) Pretty(_ int) string { + return e.String() +} + +// e.g: | label_format dst="{{ .src }}" +func (e *LabelFmtExpr) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: | json label="expression", another="expression" +func (e *JSONExpressionParser) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: | logfmt label="expression", another="expression" +func (e *LogfmtExpressionParser) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + +// e.g: sum_over_time({foo="bar"} | logfmt | unwrap bytes_processed [5m]) +func (e *UnwrapExpr) Pretty(level int) string { + s := indent(level) + + if e.Operation != "" { + s += fmt.Sprintf("%s %s %s(%s)", OpPipe, OpUnwrap, e.Operation, e.Identifier) + } else { + s += fmt.Sprintf("%s %s %s", OpPipe, OpUnwrap, e.Identifier) + } + for _, f := range e.PostFilters { + s += fmt.Sprintf("\n%s%s %s", indent(level), OpPipe, f) + } + return s +} + +// e.g: `{foo="bar"}|logfmt[5m]` +// TODO(kavi): Rename `LogRange` -> `LogRangeExpr` (to be consistent with other expressions?) +func (e *LogRange) Pretty(level int) string { + s := e.Left.Pretty(level) + + if e.Unwrap != nil { + // NOTE: | unwrap should go to newline + s += "\n" + s += e.Unwrap.Pretty(level + 1) + } + + // TODO: this will put [1m] on the same line, not in new line as people used to now. + s = fmt.Sprintf("%s [%s]", s, model.Duration(e.Interval)) + + if e.Offset != 0 { + oe := OffsetExpr{Offset: e.Offset} + s += oe.Pretty(level) + } + + return s +} + +// e.g: count_over_time({foo="bar"}[5m] offset 3h) +// TODO(kavi): why does offset not work in log queries? e.g: `{foo="bar"} offset 1h`? is it bug? or anything else? +// NOTE: Also offset expression never to be indented. It always goes with its parent expression (usually RangeExpr). +func (e *OffsetExpr) Pretty(_ int) string { + // using `model.Duration` as it can format ignoring zero units. + // e.g: time.Duration(2 * Hour) -> "2h0m0s" + // but model.Duration(2 * Hour) -> "2h" + return fmt.Sprintf(" %s %s", OpOffset, model.Duration(e.Offset)) +} + +// e.g: count_over_time({foo="bar"}[5m]) +func (e *RangeAggregationExpr) Pretty(level int) string { + s := indent(level) + if !needSplit(e) { + return s + e.String() + } + + s += e.Operation // e.g: quantile_over_time + + s += "(\n" + + // print args to the function. + if e.Params != nil { + s = fmt.Sprintf("%s%s%s,", s, indent(level+1), fmt.Sprint(*e.Params)) + s += "\n" + } + + s += e.Left.Pretty(level + 1) + + s += "\n" + indent(level) + ")" + + if e.Grouping != nil { + s += e.Grouping.Pretty(level) + } + + return s +} + +// e.g: +// sum(count_over_time({foo="bar"}[5m])) by (container) +// topk(10, count_over_time({foo="bar"}[5m])) by (container) + +// Syntax: ([parameter,] ) [without|by (
    + {{ if .Form.Action }} +
    +
    + {{ range .Form.Inputs }} +  
    + {{ end }} + +
    +
    + {{ end }} + {{ .ExtraHTML }} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go index 47bbca17..61383bce 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go @@ -33,6 +33,7 @@ import ( var ( errNoTLSConfig = errors.New("TLS config is not present") + ErrNoListeners = errors.New("no web listen address or systemd socket flag specified") ) type Config struct { @@ -42,15 +43,19 @@ type Config struct { } type TLSConfig struct { - TLSCertPath string `yaml:"cert_file"` - TLSKeyPath string `yaml:"key_file"` - ClientAuth string `yaml:"client_auth_type"` - ClientCAs string `yaml:"client_ca_file"` - CipherSuites []Cipher `yaml:"cipher_suites"` - CurvePreferences []Curve `yaml:"curve_preferences"` - MinVersion TLSVersion `yaml:"min_version"` - MaxVersion TLSVersion `yaml:"max_version"` - PreferServerCipherSuites bool `yaml:"prefer_server_cipher_suites"` + TLSCert string `yaml:"cert"` + TLSKey config_util.Secret `yaml:"key"` + ClientCAsText string `yaml:"client_ca"` + TLSCertPath string `yaml:"cert_file"` + TLSKeyPath string `yaml:"key_file"` + ClientAuth string `yaml:"client_auth_type"` + ClientCAs string `yaml:"client_ca_file"` + CipherSuites []Cipher `yaml:"cipher_suites"` + CurvePreferences []Curve `yaml:"curve_preferences"` + MinVersion TLSVersion `yaml:"min_version"` + MaxVersion TLSVersion `yaml:"max_version"` + PreferServerCipherSuites bool `yaml:"prefer_server_cipher_suites"` + ClientAllowedSans []string `yaml:"client_allowed_sans"` } type FlagConfig struct { @@ -66,6 +71,36 @@ func (t *TLSConfig) SetDirectory(dir string) { t.ClientCAs = config_util.JoinDir(dir, t.ClientCAs) } +// VerifyPeerCertificate will check the SAN entries of the client cert if there is configuration for it +func (t *TLSConfig) VerifyPeerCertificate(rawCerts [][]byte, _ [][]*x509.Certificate) error { + // sender cert comes first, see https://www.rfc-editor.org/rfc/rfc5246#section-7.4.2 + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return fmt.Errorf("error parsing client certificate: %s", err) + } + + // Build up a slice of strings with all Subject Alternate Name values + sanValues := append(cert.DNSNames, cert.EmailAddresses...) + + for _, ip := range cert.IPAddresses { + sanValues = append(sanValues, ip.String()) + } + + for _, uri := range cert.URIs { + sanValues = append(sanValues, uri.String()) + } + + for _, sanValue := range sanValues { + for _, allowedSan := range t.ClientAllowedSans { + if sanValue == allowedSan { + return nil + } + } + } + + return fmt.Errorf("could not find allowed SANs in client cert, found: %v", t.ClientAllowedSans) +} + type HTTPConfig struct { HTTP2 bool `yaml:"http2"` Header map[string]string `yaml:"headers,omitempty"` @@ -100,22 +135,54 @@ func getTLSConfig(configPath string) (*tls.Config, error) { return ConfigToTLSConfig(&c.TLSConfig) } -// ConfigToTLSConfig generates the golang tls.Config from the TLSConfig struct. -func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { - if c.TLSCertPath == "" && c.TLSKeyPath == "" && c.ClientAuth == "" && c.ClientCAs == "" { - return nil, errNoTLSConfig +func validateTLSPaths(c *TLSConfig) error { + if c.TLSCertPath == "" && c.TLSCert == "" && + c.TLSKeyPath == "" && c.TLSKey == "" && + c.ClientCAs == "" && c.ClientCAsText == "" && + c.ClientAuth == "" { + return errNoTLSConfig } - if c.TLSCertPath == "" { - return nil, errors.New("missing cert_file") + if c.TLSCertPath == "" && c.TLSCert == "" { + return errors.New("missing one of cert or cert_file") } - if c.TLSKeyPath == "" { - return nil, errors.New("missing key_file") + if c.TLSKeyPath == "" && c.TLSKey == "" { + return errors.New("missing one of key or key_file") + } + + return nil +} + +// ConfigToTLSConfig generates the golang tls.Config from the TLSConfig struct. +func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { + if err := validateTLSPaths(c); err != nil { + return nil, err } loadCert := func() (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.TLSCertPath, c.TLSKeyPath) + var certData, keyData []byte + var err error + + if c.TLSCertPath != "" { + certData, err = os.ReadFile(c.TLSCertPath) + if err != nil { + return nil, fmt.Errorf("failed to read cert_file (%s): %s", c.TLSCertPath, err) + } + } else { + certData = []byte(c.TLSCert) + } + + if c.TLSKeyPath != "" { + keyData, err = os.ReadFile(c.TLSKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read key_file (%s): %s", c.TLSKeyPath, err) + } + } else { + keyData = []byte(c.TLSKey) + } + + cert, err := tls.X509KeyPair(certData, keyData) if err != nil { return nil, fmt.Errorf("failed to load X509KeyPair: %w", err) } @@ -161,6 +228,15 @@ func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { } clientCAPool.AppendCertsFromPEM(clientCAFile) cfg.ClientCAs = clientCAPool + } else if c.ClientCAsText != "" { + clientCAPool := x509.NewCertPool() + clientCAPool.AppendCertsFromPEM([]byte(c.ClientCAsText)) + cfg.ClientCAs = clientCAPool + } + + if c.ClientAllowedSans != nil { + // verify that the client cert contains an allowed SAN + cfg.VerifyPeerCertificate = c.VerifyPeerCertificate } switch c.ClientAuth { @@ -178,7 +254,7 @@ func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { return nil, errors.New("Invalid ClientAuth: " + c.ClientAuth) } - if c.ClientCAs != "" && cfg.ClientAuth == tls.NoClientCert { + if (c.ClientCAs != "" || c.ClientCAsText != "") && cfg.ClientAuth == tls.NoClientCert { return nil, errors.New("Client CA's have been configured without a Client Auth Policy") } @@ -203,7 +279,11 @@ func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagCon // WebSystemdSocket in the FlagConfig is true. The FlagConfig is also passed on // to ServeMultiple. func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) error { - if *flags.WebSystemdSocket { + if flags.WebSystemdSocket == nil && (flags.WebListenAddresses == nil || len(*flags.WebListenAddresses) == 0) { + return ErrNoListeners + } + + if flags.WebSystemdSocket != nil && *flags.WebSystemdSocket { level.Info(logger).Log("msg", "Listening on systemd activated listeners instead of port listeners.") listeners, err := activation.Listeners() if err != nil { @@ -214,6 +294,7 @@ func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) e } return ServeMultiple(listeners, server, flags, logger) } + listeners := make([]net.Listener, 0, len(*flags.WebListenAddresses)) for _, address := range *flags.WebListenAddresses { listener, err := net.Listen("tcp", address) diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index a197699a..c24864a9 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -2,6 +2,7 @@ linters: enable: - godot + - misspell - revive linter-settings: @@ -10,3 +11,5 @@ linter-settings: exclude: # Ignore "See: URL" - 'See:' + misspell: + locale: US diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index e358db69..b111d256 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 +GOLANGCI_LINT_VERSION ?= v1.51.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -205,7 +207,7 @@ common-tarball: promu .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 43c37735..1224816c 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory extracting the ttar file using `make fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` Next, make the required changes to the extracted files in the `fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 68f36e88..28783e2d 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index f5b7939b..4a173636 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } node := strings.TrimRight(parts[1], ",") @@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 06968ca2..f4f5501c 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5048ad1f..9a73e263 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 0102ab0f..60c551e0 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -21,6 +21,7 @@ import ( // kernel data structures. type FS struct { proc fs.FS + real bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) { if err != nil { return FS{}, err } - return FS{fs}, nil + + real, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, real}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 00000000..80057696 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build netbsd || openbsd || solaris || windows +// +build netbsd openbsd solaris windows + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct +func isRealProc(mountPoint string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 00000000..6233217a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !netbsd && !openbsd && !solaris && !windows +// +build !netbsd,!openbsd,!solaris,!windows + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f8070e6e..f560a8db 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index b030951f..14272dc7 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) { return us, nil } +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { data, err := os.ReadFile(path) diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 391c0795..5a145bbf 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0096cafb..59465c5b 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index a95c889c..fdd4b954 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } else { syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) @@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } return syncedBlocks, pct, finish, speed, nil diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index f65e174e..eaf00e22 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) } return *m, nil @@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) } v, err := strconv.ParseUint(fields[1], 0, 64) diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d505..388ebf39 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 0c482c18..852c8c4a 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -186,6 +186,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The average time from the point the client sends RPC requests until it receives the response. + AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -264,7 +266,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -288,7 +290,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -306,7 +308,7 @@ func parseMount(ss []string) (*Mount, error) { for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -343,7 +345,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -358,7 +360,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -369,7 +371,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -379,7 +381,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -389,7 +391,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -428,7 +430,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -457,7 +459,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -521,7 +523,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], @@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } + if ns[0] != 0 { + opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) + } if len(ns) > 8 { opStats.Errors = ns[8] @@ -572,10 +576,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -584,13 +588,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 8300daca..fdfa4561 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "strconv" "strings" "github.com/prometheus/procfs/internal/util" @@ -28,9 +27,13 @@ import ( // and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 + Searched uint64 Found uint64 + New uint64 Invalid uint64 Ignore uint64 + Delete uint64 + DeleteList uint64 Insert uint64 InsertFailed uint64 Drop uint64 @@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { // Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) + entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } - entry.InsertFailed = insertFailed - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], } - entry.Drop = drop - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] } - entry.EarlyDrop = earlyDrop - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string. -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err + return stats, nil } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 7fd57d7f..4da81ea5 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) } } @@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 374b6f73..b6c77b70 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 00000000..deb7029f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index e36f4872..360e36af 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 06b7b8f2..c7708529 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -76,13 +76,14 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { s := bufio.NewScanner(r) var stats []SoftnetStat + cpuIndex := 0 for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 @@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { softnetStat.SoftnetBacklogLen = us[0] softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) } softnetStat.Width = width stats = append(stats, softnetStat) + cpuIndex++ } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 98aa8e1c..acbbc57e 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, l := len(fields) if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 00000000..7443edca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + } + + return interfaces, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index f9d9d243..932ef204 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 5cc40aef..742dff45 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,7 +15,6 @@ package procfs import ( "bufio" - "io" "os" "path/filepath" "strconv" @@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) { var netStatsTotal []NetStat for _, filePath := range statFiles { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - procNetstat, err := parseNetstat(file) + procNetstat, err := parseNetstat(filePath) if err != nil { return nil, err } @@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) { // parseNetstat parses the metrics from `/proc/net/stat/` file // and returns a NetStat structure. -func parseNetstat(r io.Reader) (NetStat, error) { - var ( - scanner = bufio.NewScanner(r) - netStat = NetStat{ - Stats: make(map[string][]uint64), - } - ) +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + scanner := bufio.NewScanner(file) scanner.Scan() // First string is always a header for stats diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index c30223af..e1599961 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -15,13 +15,13 @@ package procfs import ( "bytes" + "errors" "fmt" "io" "os" "strconv" "strings" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -30,12 +30,18 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. @@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil @@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.real { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + fds, err := p.fileDescriptors() if err != nil { return 0, err @@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil } func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } // FileDescriptorsInfo retrieves information about all file descriptors of diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index ea83a75f..daeed7f5 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 24d4dce9..5dd49389 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { fields := strings.Fields(CgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) } CgroupSummary := &CgroupSummary{ @@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { } CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) } CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) } CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) } return CgroupSummary, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 1bbdd4a8..4b7933e4 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -111,7 +111,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 9df79c23..86b4b452 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) { continue } if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) } intName := parts[0][:len(parts[0])-1] // remove trailing : diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 7a138818..c86d815d 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) { //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index f1bcbf32..727549a1 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -65,7 +65,7 @@ type ProcMap struct { func parseDevice(s string) (uint64, error) { toks := strings.Split(s, ":") if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) } major, err := strconv.ParseUint(toks[0], 16, 0) @@ -95,7 +95,7 @@ func parseAddress(s string) (uintptr, error) { func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) } saddr, err := parseAddress(toks[0]) @@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) { func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 6a43bb24..8e3ff4d7 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 391b4cbd..c2266675 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index a68fe152..152539d3 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(resource, bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 6c46b718..b9d2cf64 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index b278eb2c..923e5500 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,7 +18,6 @@ import ( "fmt" "os" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -112,7 +111,7 @@ type ProcStat struct { // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 - proc fs.FS + proc FS } // NewStat returns the current status information of the process. @@ -139,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) @@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.proc.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 3d8c0643..c055d075 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "sort" "strconv" "strings" @@ -76,6 +77,9 @@ type ProcStatus struct { UIDs [4]string // GIDs of the process (Real, effective, saved set, and filesystem GIDs) GIDs [4]string + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 } // NewStatus returns the current status information of the process. @@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.VoluntaryCtxtSwitches = vUint case "nonvoluntary_ctxt_switches": s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) } + } // TotalCtxtSwitches returns the total context switch. func (s ProcStatus) TotalCtxtSwitches() uint64 { return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches } + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index d46533eb..12c5bf05 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index bc9aaf5c..8611c901 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) { l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 559129cb..b8fad677 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { ) if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) } for scanner.Scan() { @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 586af48a..05269c89 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -197,34 +197,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -247,7 +247,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 15edc221..fa00f555 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) { swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index f08bfc76..394762c7 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} @@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) { if err != nil { continue } - t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}}) } return t, nil @@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := fsi.FS(proc.path("task")) - if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.real} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } return Proc{PID: tid, fs: tfs}, nil diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cdedcae9..51c49d89 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) { return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } files, err := os.ReadDir(path) diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index c745a4c0..ce5fefa5 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index a29c98ee..7824780c 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage/remote/azuread" ) var ( @@ -146,13 +147,14 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the - // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, + // ScrapeTimeout and ScrapeInterval default to the configured + // globals. + ScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -173,16 +175,16 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ - // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 500 samples per batch, we will be able to push 1M samples/s. - MaxShards: 200, + // With a maximum of 50 shards, assuming an average of 100ms remote write + // time and 2000 samples per batch, we will be able to push 1M samples/s. + MaxShards: 50, MinShards: 1, - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, - // Each shard will have a max of 2500 samples pending in its channel, plus the pending - // samples that have been enqueued. Theoretically we should only ever have about 3000 samples - // per shard pending. At 200 shards that's 600k. - Capacity: 2500, + // Each shard will have a max of 10,000 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples + // per shard pending. At 50 shards that's 600k. + Capacity: 10000, BatchSendDeadline: model.Duration(5 * time.Second), // Backoff times for retrying a batch of samples on recoverable errors. @@ -194,7 +196,7 @@ var ( DefaultMetadataConfig = MetadataConfig{ Send: true, SendInterval: model.Duration(1 * time.Minute), - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, } // DefaultRemoteReadConfig is the default remote read configuration. @@ -266,7 +268,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { for i, scfg := range c.ScrapeConfigs { // We do these checks for library users that would not call Validate in // Unmarshal. - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err } @@ -293,7 +295,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { return nil, fileErr(filename, err) } for _, scfg := range cfg.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, fileErr(filename, err) } @@ -342,7 +344,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return err } @@ -389,6 +391,27 @@ type GlobalConfig struct { QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. 0 means no limit. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -467,6 +490,8 @@ type ScrapeConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // Whether to scrape a classic histogram that is also exposed as a native histogram. + ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -475,20 +500,26 @@ type ScrapeConfig struct { // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` // More than this many samples post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the - // scrapes to fail. + // scrapes to fail. 0 means no limit. TargetLimit uint `yaml:"target_limit,omitempty"` // More than this many labels post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. LabelLimit uint `yaml:"label_limit,omitempty"` // More than this label name length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` // More than this label value length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // More than this many buckets in a native histogram will cause the scrape to + // fail. + NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -546,25 +577,47 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { +func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c == nil { return errors.New("empty or null scrape config section") } // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. if c.ScrapeInterval == 0 { - c.ScrapeInterval = defaultInterval + c.ScrapeInterval = globalConfig.ScrapeInterval } if c.ScrapeTimeout > c.ScrapeInterval { return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) } if c.ScrapeTimeout == 0 { - if defaultTimeout > c.ScrapeInterval { + if globalConfig.ScrapeTimeout > c.ScrapeInterval { c.ScrapeTimeout = c.ScrapeInterval } else { - c.ScrapeTimeout = defaultTimeout + c.ScrapeTimeout = globalConfig.ScrapeTimeout } } + if c.BodySizeLimit == 0 { + c.BodySizeLimit = globalConfig.BodySizeLimit + } + if c.SampleLimit == 0 { + c.SampleLimit = globalConfig.SampleLimit + } + if c.TargetLimit == 0 { + c.TargetLimit = globalConfig.TargetLimit + } + if c.LabelLimit == 0 { + c.LabelLimit = globalConfig.LabelLimit + } + if c.LabelNameLengthLimit == 0 { + c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit + } + if c.LabelValueLengthLimit == 0 { + c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit + } + if c.KeepDroppedTargets == 0 { + c.KeepDroppedTargets = globalConfig.KeepDroppedTargets + } + return nil } @@ -766,6 +819,7 @@ type AlertmanagerConfig struct { ServiceDiscoveryConfigs discovery.Configs `yaml:"-"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` // The URL scheme to use when talking to Alertmanagers. Scheme string `yaml:"scheme,omitempty"` @@ -801,6 +855,13 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er return err } + httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || + c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + + if httpClientConfigAuthEnabled && c.SigV4Config != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + } + // Check for users putting URLs in target groups. if len(c.RelabelConfigs) == 0 { if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil { @@ -864,6 +925,7 @@ type RemoteWriteConfig struct { QueueConfig QueueConfig `yaml:"queue_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` + AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -900,8 +962,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil - if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + } + + if c.SigV4Config != nil && c.AzureADConfig != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") } return nil @@ -922,7 +988,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 8b304a0f..4d602769 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -180,11 +180,9 @@ func (m *Manager) Providers() []*Provider { // Run starts the background processing. func (m *Manager) Run() error { go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil + <-m.ctx.Done() + m.cancelDiscoverers() + return m.ctx.Err() } // SyncCh returns a read only channel used by all the clients to receive target updates. diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go index 8274628c..13168a07 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/registry.go +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index cd73083b..41873278 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -15,6 +15,7 @@ package histogram import ( "fmt" + "math" "strings" ) @@ -93,26 +94,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - // TODO(beorn7): This is a straight-forward implementation using merging - // iterators for the original buckets and then adding one merged bucket - // after another to the newly created FloatHistogram. It's well possible - // that a more involved implementation performs much better, which we - // could do if this code path turns out to be performance-critical. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); { - b := it.At() - c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); { - b := it.At() - c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } + c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema) + c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema) return &c } @@ -148,6 +131,55 @@ func (h *FloatHistogram) String() string { return sb.String() } +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + // ZeroBucket returns the zero bucket. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { return Bucket[float64]{ @@ -159,12 +191,12 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] { } } -// Scale scales the FloatHistogram by the provided factor, i.e. it scales all +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all // bucket counts including the zero bucket and the count and the sum of // observations. The bucket layout stays the same. This method changes the // receiving histogram directly (rather than acting on a copy). It returns a // pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { h.ZeroCount *= factor h.Count *= factor h.Sum *= factor @@ -177,6 +209,21 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { return h } +// Div works like Mul but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + // Add adds the provided other histogram to the receiving histogram. Count, Sum, // and buckets from the other histogram are added to the corresponding // components of the receiving histogram. Buckets in the other histogram that do @@ -221,23 +268,17 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -248,25 +289,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -301,103 +334,6 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { return true } -// addBucket takes the "coordinates" of the last bucket that was handled and -// adds the provided bucket after it. If a corresponding bucket exists, the -// count is added. If not, the bucket is inserted. The updated slices and the -// coordinates of the inserted or added-to bucket are returned. -func addBucket( - b Bucket[float64], - spans []Span, buckets []float64, - iSpan, iBucket int, - iInSpan, index int32, -) ( - newSpans []Span, newBuckets []float64, - newISpan, newIBucket int, newIInSpan int32, -) { - if iSpan == -1 { - // First add, check if it is before all spans. - if len(spans) == 0 || spans[0].Offset > b.Index { - // Add bucket before all others. - buckets = append(buckets, 0) - copy(buckets[1:], buckets) - buckets[0] = b.Count - if len(spans) > 0 && spans[0].Offset == b.Index+1 { - spans[0].Length++ - spans[0].Offset-- - return spans, buckets, 0, 0, 0 - } - spans = append(spans, Span{}) - copy(spans[1:], spans) - spans[0] = Span{Offset: b.Index, Length: 1} - if len(spans) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spans[1].Offset -= b.Index + 1 - } - return spans, buckets, 0, 0, 0 - } - if spans[0].Offset == b.Index { - // Just add to first bucket. - buckets[0] += b.Count - return spans, buckets, 0, 0, 0 - } - // We are behind the first bucket, so set everything to the - // first bucket and continue normally. - iSpan, iBucket, iInSpan = 0, 0, 0 - index = spans[0].Offset - } - deltaIndex := b.Index - index - for { - remainingInSpan := int32(spans[iSpan].Length) - iInSpan - if deltaIndex < remainingInSpan { - // Bucket is in current span. - iBucket += int(deltaIndex) - iInSpan += deltaIndex - buckets[iBucket] += b.Count - return spans, buckets, iSpan, iBucket, iInSpan - } - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - buckets = append(buckets, 0) - copy(buckets[iBucket+1:], buckets[iBucket:]) - buckets[iBucket] = b.Count - if deltaIndex == 0 { - // Directly after previous span, extend previous span. - if iSpan < len(spans) { - spans[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spans[iSpan].Length) - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { - // Directly before next span, extend next span. - iInSpan = 0 - spans[iSpan].Offset-- - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spans) { - spans[iSpan].Offset -= deltaIndex + 1 - } - spans = append(spans, Span{}) - copy(spans[iSpan+1:], spans[iSpan:]) - spans[iSpan] = Span{Length: 1, Offset: deltaIndex} - return spans, buckets, iSpan, iBucket, iInSpan - } - // Try start of next span. - deltaIndex -= spans[iSpan].Offset - iInSpan = 0 - } -} - // Compact eliminates empty buckets at the beginning and end of each span, then // merges spans that are consecutive or at most maxEmptyBuckets apart, and // finally splits spans that contain more consecutive empty buckets than @@ -406,7 +342,7 @@ func addBucket( // receiving histogram, but a pointer to it is returned for convenience. // // The ideal value for maxEmptyBuckets depends on circumstances. The motivation -// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to // represent very few empty buckets explicitly within one span than cutting the // one span into two to treat the empty buckets as a gap between the two spans, // both in terms of storage requirement as well as in terms of encoding and @@ -600,10 +536,24 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] // set to the zero threshold. func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ - h: h, - negIter: h.NegativeReverseBucketIterator(), - posIter: h.PositiveBucketIterator(), - state: -1, + h: h, + leftIter: h.NegativeReverseBucketIterator(), + rightIter: h.PositiveBucketIterator(), + state: -1, + } +} + +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: h.PositiveReverseBucketIterator(), + rightIter: h.NegativeBucketIterator(), + state: -1, } } @@ -789,6 +739,11 @@ type floatBucketIterator struct { absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. } +func (i *floatBucketIterator) At() Bucket[float64] { + // Need to use i.targetSchema rather than i.baseBucketIterator.schema. + return i.baseBucketIterator.at(i.targetSchema) +} + func (i *floatBucketIterator) Next() bool { if i.spansIdx >= len(i.spans) { return false @@ -824,10 +779,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into origIdx += span.Offset } currIdx := i.targetIdx(origIdx) - if firstPass { + switch { + case firstPass: i.currIdx = currIdx firstPass = false - } else if currIdx != i.currIdx { + case currIdx != i.currIdx: // Reached next bucket in targetSchema. // Do not actually forward to the next bucket, but break out. break mergeLoop @@ -887,8 +843,8 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] + h *FloatHistogram + leftIter, rightIter BucketIterator[float64] // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -900,10 +856,13 @@ type allFloatBucketIterator struct { func (i *allFloatBucketIterator) Next() bool { switch i.state { case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold } return true } @@ -924,10 +883,13 @@ func (i *allFloatBucketIterator) Next() bool { } return i.Next() case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold } return true } @@ -941,3 +903,202 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } + +// targetIdx returns the bucket index in the target schema for the given bucket +// index idx in the original schema. +func targetIdx(idx, originSchema, targetSchema int32) int32 { + return ((idx - 1) >> (originSchema - targetSchema)) + 1 +} + +// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if +// positive or negative) from the original schema to the target schema. +// The target schema must be smaller than the original schema. +func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []float64 // The buckets in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + origBucketIdx int // The position of a bucket in originBuckets slice. + ) + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[0]) + lastTargetBucketIdx = targetBucketIdx + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx] + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) + lastTargetBucketIdx++ + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) + lastTargetBucketIdx = targetBucketIdx + } + + bucketIdx++ + origBucketIdx++ + } + } + + return targetSpans, targetBuckets +} + +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { + var ( + iSpan int = -1 + iBucket int = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && getBound(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } else { + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break + } else { + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index e1de5ffb..dad54cb0 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -102,16 +102,22 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { } func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { + return b.at(b.schema) +} + +// at is an internal version of the exported At to enable using a different +// schema. +func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { bucket := Bucket[BC]{ Count: BC(b.currCount), Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, b.schema) - bucket.Lower = getBound(b.currIdx-1, b.schema) + bucket.Upper = getBound(b.currIdx, schema) + bucket.Lower = getBound(b.currIdx-1, schema) } else { - bucket.Lower = -getBound(b.currIdx, b.schema) - bucket.Upper = -getBound(b.currIdx-1, b.schema) + bucket.Lower = -getBound(b.currIdx, schema) + bucket.Upper = -getBound(b.currIdx-1, schema) } bucket.LowerInclusive = bucket.Lower < 0 bucket.UpperInclusive = bucket.Upper > 0 diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 8fc40156..3dc3049b 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -19,6 +19,7 @@ import ( "bytes" "encoding/json" "strconv" + "strings" "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" @@ -169,11 +170,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +215,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } @@ -360,7 +363,7 @@ func EmptyLabels() Labels { func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) set = append(set, ls...) - slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return set } @@ -384,7 +387,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return res } @@ -531,16 +534,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - for _, d := range b.del { - if d == n { - return "" - } - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } @@ -570,24 +572,18 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - if res == nil { - // In the general case, labels are removed, modified or moved - // rather than added. - res = make(Labels, 0, len(b.base)) - } else { - res = res[:0] + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 } - // Justification that res can be the same slice as base: in this loop - // we move forward through base, and either skip an element or assign - // it to res at its current position or an earlier position. + res := make(Labels, 0, expectedSize) for _, l := range b.base { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { continue @@ -596,7 +592,7 @@ func (b *Builder) Labels(res Labels) Labels { } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } return res } @@ -623,10 +619,10 @@ func (b *ScratchBuilder) Add(name, value string) { // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } @@ -637,3 +633,9 @@ func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go similarity index 86% rename from vendor/github.com/prometheus/prometheus/model/labels/labels_string.go rename to vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index 560b643d..cc6bfcc7 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -20,6 +20,7 @@ import ( "encoding/json" "reflect" "strconv" + "strings" "unsafe" "github.com/cespare/xxhash/v2" @@ -49,15 +50,15 @@ type Labels struct { data string } -type labelSlice []Label - -func (ls labelSlice) Len() int { return len(ls) } -func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } -func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } - func decodeSize(data string, index int) (int, int) { - var size int - for shift := uint(0); ; shift += 7 { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. b := data[index] @@ -158,7 +159,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { b.Del(MetricName) b.Del(names...) } - return b.Labels(EmptyLabels()) + return b.Labels() } // Hash returns a hash value for the label set. @@ -267,26 +268,53 @@ func (ls Labels) Copy() Labels { // Get returns the value for the label with the given name. // Returns an empty string if the label doesn't exist. func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - if lName == name { - return lValue + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return "" } // Has returns true if the label with the given name is present. func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName string - lName, i = decodeString(ls.data, i) - _, i = decodeString(ls.data, i) - if lName == name { - return true + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return false } @@ -385,7 +413,7 @@ func yoloBytes(s string) (b []byte) { // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { - slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) size := labelsSize(ls) buf := make([]byte, size) marshalLabelsToSizedBuffer(ls, buf) @@ -416,37 +444,49 @@ func FromStrings(ss ...string) Labels { // Compare compares the two label sets. // The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -// TODO: replace with Less function - Compare is never needed. -// TODO: just compare the underlying strings when we don't need alphanumeric sorting. func Compare(a, b Labels) int { - l := len(a.data) - if len(b.data) < l { - l = len(b.data) - } - - ia, ib := 0, 0 - for ia < l { - var aName, bName string - aName, ia = decodeString(a.data, ia) - bName, ib = decodeString(b.data, ib) - if aName != bName { - if aName < bName { - return -1 - } - return 1 - } - var aValue, bValue string - aValue, ia = decodeString(a.data, ia) - bValue, ib = decodeString(b.data, ib) - if aValue != bValue { - if aValue < bValue { - return -1 - } - return 1 + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break } } - // If all labels so far were in common, the set with fewer labels comes first. - return len(a.data) - len(b.data) + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent := i + for i = 0; ; { + size, nextI := decodeSize(a.data, i) + if nextI+size > firstCharDifferent { + break + } + i = nextI + size + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 } // Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. @@ -587,14 +627,15 @@ func (b *Builder) Set(n, v string) *Builder { } func (b *Builder) Get(n string) string { - if slices.Contains(b.del, n) { - return "" - } + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. for _, a := range b.add { if a.Name == n { return a.Value } } + if slices.Contains(b.del, n) { + return "" + } return b.base.Get(n) } @@ -624,20 +665,19 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) slices.Sort(b.del) a, d := 0, 0 bufSize := len(b.base.data) + labelsSize(b.add) - buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + buf := make([]byte, 0, bufSize) for pos := 0; pos < len(b.base.data); { oldPos := pos var lName string @@ -791,10 +831,10 @@ func (b *ScratchBuilder) Add(name, value string) { // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(l Labels) { b.output = l } @@ -812,7 +852,7 @@ func (b *ScratchBuilder) Labels() Labels { } // Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) if size <= cap(b.overwriteBuffer) { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index e09a6377..14319c7f 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -25,9 +25,16 @@ type FastRegexMatcher struct { prefix string suffix string contains string + + // shortcut for literals + literal bool + value string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + if isLiteral(v) { + return &FastRegexMatcher{literal: true, value: v}, nil + } re, err := regexp.Compile("^(?:" + v + ")$") if err != nil { return nil, err @@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } func (m *FastRegexMatcher) MatchString(s string) bool { + if m.literal { + return s == m.value + } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false } @@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool { } func (m *FastRegexMatcher) GetRegexString() string { + if m.literal { + return m.value + } return m.re.String() } +func isLiteral(re string) bool { + return regexp.QuoteMeta(re) == re +} + // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 5ef79b4a..fadf35b8 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -202,16 +202,18 @@ func (re Regexp) String() string { return str[4 : len(str)-2] } -// Process returns a relabeled copy of the given label set. The relabel configurations +// Process returns a relabeled version of the given label set. The relabel configurations // are applied in order of input. +// There are circumstances where Process will modify the input label. +// If you want to avoid issues with the input label set being modified, at the cost of +// higher memory usage, you can use lbls.Copy(). // If a label set is dropped, EmptyLabels and false is returned. -// May return the input labelSet modified. func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { lb := labels.NewBuilder(lbls) if !ProcessBuilder(lb, cfgs...) { return labels.EmptyLabels(), false } - return lb.Labels(lbls), true + return lb.Labels(), true } // ProcessBuilder is like Process, but the caller passes a labels.Builder diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 30b3face..03cbd884 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -173,17 +173,11 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { }) } if r.Record.Value == "" && r.Alert.Value == "" { - if r.Record.Value == "0" { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Alert, - }) - } else { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Record, - }) - } + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("one of 'record' or 'alert' must be set"), + node: &r.Record, + nodeAlt: &r.Alert, + }) } if r.Expr.Value == "" { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 9efd942e..38903afc 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -59,7 +59,9 @@ type Parser interface { Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed - // exemplar. It returns if an exemplar exists or not. + // exemplar. It can be called repeatedly to retrieve multiple exemplars + // for the same sample. It returns false once all exemplars are + // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool // Next advances the parser to the next sample. It returns false if no @@ -71,7 +73,7 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) { if contentType == "" { return NewPromParser(b), nil } @@ -84,7 +86,7 @@ func New(b []byte, contentType string) (Parser, error) { case "application/openmetrics-text": return NewOpenMetricsParser(b), nil case "application/vnd.google.protobuf": - return NewProtobufParser(b), nil + return NewProtobufParser(b, parseClassicHistograms), nil default: return NewPromParser(b), nil } @@ -100,7 +102,7 @@ const ( EntrySeries Entry = 2 // A series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a sparse histogram as a value. + EntryHistogram Entry = 5 // A series with a native histogram as a value. ) // MetricType represents metric type values. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index c17d4002..5623e683 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -174,8 +174,10 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns the whether an exemplar exists. +// Exemplar writes the exemplar of the current sample into the passed exemplar. +// It returns whether an exemplar exists. As OpenMetrics only ever has one +// exemplar per sample, every call after the first (for the same sample) will +// always return false. func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { if len(p.exemplar) == 0 { return false @@ -204,6 +206,8 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { p.builder.Sort() e.Labels = p.builder.Labels() + // Wipe exemplar so that future calls return false. + p.exemplar = p.exemplar[:0] return true } @@ -334,7 +338,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) @@ -387,7 +391,7 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { return fmt.Errorf("invalid exemplar timestamp %f", ts) @@ -457,7 +461,7 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 2c981f05..04c295dd 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns if an exemplar exists. -func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } @@ -347,7 +348,7 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, p.parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.val) { @@ -360,7 +361,7 @@ func (p *PromParser) Next() (Entry, error) { case tTimestamp: p.hasTS = true if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if t2 := p.nextToken(); t2 != tLinebreak { return EntryInvalid, p.parseError("expected next entry after timestamp", t2) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index eca14595..fbb84a2b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -52,8 +52,14 @@ type ProtobufParser struct { // fieldPos is the position within a Summary or (legacy) Histogram. -2 // is the count. -1 is the sum. Otherwise it is the index within // quantiles/buckets. - fieldPos int - fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. state Entry @@ -62,17 +68,22 @@ type ProtobufParser struct { mf *dto.MetricFamily + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + // The following are just shenanigans to satisfy the Parser interface. metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser { return &ProtobufParser{ - in: b, - state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, } } @@ -98,7 +109,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { v = float64(s.GetSampleCount()) case -1: v = s.GetSampleSum() - // Need to detect a summaries without quantile here. + // Need to detect summaries without quantile here. if len(s.GetQuantile()) == 0 { p.fieldsDone = true } @@ -106,19 +117,28 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { v = s.GetQuantile()[p.fieldPos].GetValue() } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - // This should only happen for a legacy histogram. + // This should only happen for a classic histogram. h := m.GetHistogram() switch p.fieldPos { case -2: - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } case -1: v = h.GetSampleSum() default: bb := h.GetBucket() if p.fieldPos >= len(bb) { - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } } else { - v = float64(bb[p.fieldPos].GetCumulativeCount()) + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } } } default: @@ -149,6 +169,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ts = m.GetTimestampMs() h = m.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ @@ -274,8 +297,12 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Exemplar writes the exemplar of the current sample into the passed // exemplar. It returns if an exemplar exists or not. In case of a native // histogram, the legacy bucket section is still used for exemplars. To ingest -// all examplars, call the Exemplar method repeatedly until it returns false. +// all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar switch p.mf.GetType() { @@ -316,6 +343,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { } p.builder.Sort() ex.Labels = p.builder.Labels() + p.exemplarReturned = true return true } @@ -323,6 +351,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false switch p.state { case EntryInvalid: p.metricPos = 0 @@ -376,6 +405,12 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } case EntryHistogram, EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } t := p.mf.GetType() if p.state == EntrySeries && !p.fieldsDone && (t == dto.MetricType_SUMMARY || @@ -386,6 +421,14 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } } if p.metricPos >= len(p.mf.GetMetric()) { p.state = EntryInvalid @@ -432,7 +475,7 @@ func (p *ProtobufParser) updateMetricBytes() error { // state. func (p *ProtobufParser) getMagicName() string { t := p.mf.GetType() - if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { return p.mf.GetName() } if p.fieldPos == -2 { @@ -521,18 +564,17 @@ func formatOpenMetricsFloat(f float64) string { return s + ".0" } -// isNativeHistogram returns false iff the provided histograms has no sparse -// buckets and a zero threshold of 0 and a zero count of 0. In principle, this -// could still be meant to be a native histogram (with a zero threshold of 0 and -// no observations yet), but for now, we'll treat this case as a conventional -// histogram. -// -// TODO(beorn7): In the final format, there should be an unambiguous way of -// deciding if a histogram should be ingested as a conventional one or a native -// one. +// isNativeHistogram returns false iff the provided histograms has no spans at +// all (neither positive nor negative) and a zero threshold of 0 and a zero +// count of 0. In principle, this could still be meant to be a native histogram +// with a zero threshold of 0 and no observations yet. In that case, +// instrumentation libraries should add a "no-op" span (e.g. length zero, offset +// zero) to signal that the histogram is meant to be parsed as a native +// histogram. Failing to do so will cause Prometheus to parse it as a classic +// histogram as long as no observations have happened. func isNativeHistogram(h *dto.Histogram) bool { - return len(h.GetNegativeDelta()) > 0 || - len(h.GetPositiveDelta()) > 0 || - h.GetZeroCount() > 0 || - h.GetZeroThreshold() > 0 + return len(h.GetPositiveSpan()) > 0 || + len(h.GetNegativeSpan()) > 0 || + h.GetZeroThreshold() > 0 || + h.GetZeroCount() > 0 } diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 79697d07..af557993 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" @@ -349,19 +350,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - // Attach external labels before relabelling and sending. - for _, a := range alerts { - lb := labels.NewBuilder(a.Labels) - - n.opts.ExternalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - a.Labels = lb.Labels(a.Labels) - } - alerts = n.relabelAlerts(alerts) if len(alerts) == 0 { return @@ -390,15 +378,25 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } +// Attach external labels and process relabelling rules. func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert - for _, alert := range alerts { - labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if keep { - alert.Labels = labels - relabeledAlerts = append(relabeledAlerts, alert) + for _, a := range alerts { + lb.Reset(a.Labels) + n.opts.ExternalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + if !keep { + continue } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) } return relabeledAlerts } @@ -643,6 +641,17 @@ func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metri if err != nil { return nil, err } + t := client.Transport + + if cfg.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) + if err != nil { + return nil, err + } + } + + client.Transport = t + s := &alertmanagerSet{ client: client, cfg: cfg, @@ -701,36 +710,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tlset := range tg.Targets { - lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels)) + lb.Reset(labels.EmptyLabels()) for ln, lv := range tlset { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } // Set configured scheme as the initial scheme label for overwrite. - lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) - lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)}) + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) // Combine target labels with target group labels. for ln, lv := range tg.Labels { if _, ok := tlset[ln]; !ok { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } } - lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) continue } - addr := lset.Get(model.AddressLabel) + addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - res = append(res, alertmanagerLabels{lset}) + res = append(res, alertmanagerLabels{lb.Labels()}) } return res, droppedAlertManagers, nil } diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md index 8c19b17e..a33d7bfb 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/README.md +++ b/vendor/github.com/prometheus/prometheus/prompb/README.md @@ -4,6 +4,6 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.12.3) in your -PATH. +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in +your PATH. diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 4b07187b..13d6e0f0 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -20,6 +20,11 @@ import ( func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index 3e4bc7df..05d25747 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -172,11 +172,12 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } @@ -226,6 +227,13 @@ func (m *Counter) GetExemplar() *Exemplar { return nil } +func (m *Counter) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + type Quantile struct { Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` @@ -282,12 +290,13 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` - SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } @@ -344,6 +353,13 @@ func (m *Summary) GetQuantile() []Quantile { return nil } +func (m *Summary) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + type Untyped struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -396,7 +412,8 @@ type Histogram struct { SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -414,6 +431,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float @@ -486,6 +506,13 @@ func (m *Histogram) GetBucket() []Bucket { return nil } +func (m *Histogram) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + func (m *Histogram) GetSchema() int32 { if m != nil { return m.Schema @@ -938,65 +965,68 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 923 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12, - 0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26, - 0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, - 0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f, - 0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80, - 0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27, - 0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58, - 0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41, - 0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7, - 0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5, - 0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50, - 0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68, - 0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91, - 0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf, - 0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94, - 0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36, - 0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03, - 0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1, - 0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4, - 0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61, - 0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1, - 0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac, - 0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81, - 0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76, - 0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13, - 0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08, - 0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d, - 0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29, - 0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18, - 0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f, - 0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c, - 0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95, - 0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8, - 0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95, - 0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22, - 0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5, - 0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49, - 0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a, - 0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53, - 0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5, - 0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae, - 0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8, - 0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a, - 0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26, - 0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c, - 0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c, - 0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87, - 0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60, - 0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b, - 0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96, - 0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c, - 0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef, - 0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1, - 0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a, - 0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f, - 0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00, + // 963 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xee, 0x76, 0xfd, 0x93, 0x3d, 0x8e, 0x93, 0xcd, 0x60, 0x55, 0xab, 0x40, 0x62, 0xb3, 0x12, + 0x52, 0x40, 0xc8, 0x16, 0x50, 0x04, 0x2a, 0x45, 0x22, 0x69, 0xd3, 0x14, 0x15, 0xb7, 0x65, 0x6c, + 0x5f, 0x94, 0x9b, 0xd5, 0xd8, 0x9e, 0xac, 0x57, 0xec, 0xee, 0x2c, 0xfb, 0x53, 0x11, 0xee, 0x79, + 0x06, 0x5e, 0x01, 0xf1, 0x1c, 0x08, 0xf5, 0x92, 0x07, 0x40, 0x08, 0xe5, 0x49, 0xd0, 0xfc, 0xed, + 0x3a, 0xd5, 0xba, 0x90, 0xf6, 0x6e, 0xe6, 0xf3, 0x77, 0xce, 0x7c, 0xe7, 0x9b, 0xf1, 0x39, 0x0b, + 0x6e, 0xc0, 0x46, 0x49, 0xca, 0x22, 0x9a, 0xaf, 0x68, 0x91, 0x8d, 0x16, 0x61, 0x40, 0xe3, 0x7c, + 0x14, 0xd1, 0x3c, 0x0d, 0x16, 0xd9, 0x30, 0x49, 0x59, 0xce, 0x50, 0x2f, 0x60, 0xc3, 0x8a, 0x33, + 0x94, 0x9c, 0xfd, 0x9e, 0xcf, 0x7c, 0x26, 0x08, 0x23, 0xbe, 0x92, 0xdc, 0xfd, 0xbe, 0xcf, 0x98, + 0x1f, 0xd2, 0x91, 0xd8, 0xcd, 0x8b, 0xf3, 0x51, 0x1e, 0x44, 0x34, 0xcb, 0x49, 0x94, 0x48, 0x82, + 0xfb, 0x29, 0x58, 0xdf, 0x90, 0x39, 0x0d, 0x9f, 0x92, 0x20, 0x45, 0x08, 0x1a, 0x31, 0x89, 0xa8, + 0x63, 0x0c, 0x8c, 0x23, 0x0b, 0x8b, 0x35, 0xea, 0x41, 0xf3, 0x39, 0x09, 0x0b, 0xea, 0xdc, 0x14, + 0xa0, 0xdc, 0xb8, 0x07, 0xd0, 0x3c, 0x23, 0x85, 0xbf, 0xf6, 0x33, 0x8f, 0x31, 0xf4, 0xcf, 0xbf, + 0x19, 0xd0, 0xbe, 0xc7, 0x8a, 0x38, 0xa7, 0x69, 0x3d, 0x03, 0xdd, 0x81, 0x2d, 0xfa, 0x23, 0x8d, + 0x92, 0x90, 0xa4, 0x22, 0x73, 0xe7, 0xe3, 0xc3, 0x61, 0x5d, 0x5d, 0xc3, 0x53, 0xc5, 0xc2, 0x25, + 0x1f, 0x8d, 0x61, 0x6f, 0x91, 0x52, 0x92, 0xd3, 0xa5, 0x57, 0x96, 0xe3, 0x98, 0x22, 0xc9, 0xfe, + 0x50, 0x16, 0x3c, 0xd4, 0x05, 0x0f, 0xa7, 0x9a, 0x71, 0xd2, 0x78, 0xf1, 0x77, 0xdf, 0xc0, 0xb6, + 0x0a, 0x2d, 0x71, 0xf7, 0x2e, 0x6c, 0x7d, 0x5b, 0x90, 0x38, 0x0f, 0x42, 0x8a, 0xf6, 0x61, 0xeb, + 0x07, 0xb5, 0x56, 0x7a, 0xcb, 0xfd, 0x55, 0x27, 0xca, 0x52, 0xff, 0x32, 0xa0, 0x3d, 0x29, 0xa2, + 0x88, 0xa4, 0x17, 0xe8, 0x5d, 0xd8, 0xce, 0x48, 0x94, 0x84, 0xd4, 0x5b, 0xf0, 0xe2, 0x45, 0x86, + 0x06, 0xee, 0x48, 0x4c, 0xf8, 0x81, 0x0e, 0x00, 0x14, 0x25, 0x2b, 0x22, 0x95, 0xc9, 0x92, 0xc8, + 0xa4, 0x88, 0xd0, 0x57, 0x6b, 0xe7, 0x9b, 0x03, 0x73, 0xb3, 0x2d, 0x5a, 0xb1, 0xa8, 0xea, 0xc6, + 0x9a, 0xca, 0x5a, 0x73, 0x1a, 0xaf, 0x6d, 0x4e, 0x1f, 0xda, 0xb3, 0x38, 0xbf, 0x48, 0xe8, 0x72, + 0xc3, 0x55, 0xff, 0xde, 0x04, 0xeb, 0x61, 0x90, 0xe5, 0xcc, 0x4f, 0x49, 0xf4, 0x7f, 0x1c, 0xf8, + 0x10, 0xd0, 0x3a, 0xc5, 0x3b, 0x0f, 0x19, 0xc9, 0x85, 0x42, 0x03, 0xdb, 0x6b, 0xc4, 0x07, 0x1c, + 0xff, 0x2f, 0xbf, 0xee, 0x40, 0x6b, 0x5e, 0x2c, 0xbe, 0xa7, 0xb9, 0x72, 0xeb, 0x9d, 0x7a, 0xb7, + 0x4e, 0x04, 0x47, 0x79, 0xa5, 0x22, 0xea, 0x9d, 0xda, 0x7d, 0x5d, 0xa7, 0xd0, 0x2d, 0x68, 0x65, + 0x8b, 0x15, 0x8d, 0x88, 0xd3, 0x1c, 0x18, 0x47, 0x7b, 0x58, 0xed, 0xd0, 0x7b, 0xb0, 0xf3, 0x13, + 0x4d, 0x99, 0x97, 0xaf, 0x52, 0x9a, 0xad, 0x58, 0xb8, 0x74, 0x5a, 0xa2, 0x8a, 0x2e, 0x47, 0xa7, + 0x1a, 0xe4, 0x85, 0x0a, 0x9a, 0xf4, 0xad, 0x2d, 0x7c, 0xb3, 0x38, 0x22, 0x5d, 0x3b, 0x02, 0xbb, + 0xfa, 0x59, 0x79, 0xb6, 0x25, 0xf2, 0xec, 0x94, 0x24, 0xe9, 0xd8, 0x23, 0xe8, 0xc6, 0xd4, 0x27, + 0x79, 0xf0, 0x9c, 0x7a, 0x59, 0x42, 0x62, 0xc7, 0x12, 0xce, 0x0c, 0x5e, 0xe5, 0xcc, 0x24, 0x21, + 0xb1, 0x72, 0x67, 0x5b, 0x07, 0x73, 0x8c, 0x8b, 0x2f, 0x93, 0x2d, 0x69, 0x98, 0x13, 0x07, 0x06, + 0xe6, 0x11, 0xc2, 0xe5, 0x11, 0xf7, 0x39, 0x78, 0x85, 0x26, 0x0b, 0xe8, 0x0c, 0x4c, 0x5e, 0xa3, + 0x46, 0x65, 0x11, 0x8f, 0xa0, 0x9b, 0xb0, 0x2c, 0xa8, 0xa4, 0x6d, 0x5f, 0x4f, 0x9a, 0x0e, 0xd6, + 0xd2, 0xca, 0x64, 0x52, 0x5a, 0x57, 0x4a, 0xd3, 0x68, 0x29, 0xad, 0xa4, 0x49, 0x69, 0x3b, 0x52, + 0x9a, 0x46, 0x85, 0x34, 0xf7, 0x0f, 0x03, 0x5a, 0xf2, 0x40, 0xf4, 0x3e, 0xd8, 0x8b, 0x22, 0x2a, + 0xc2, 0xf5, 0x72, 0xe4, 0x3b, 0xde, 0xad, 0x70, 0x59, 0xd0, 0x6d, 0xb8, 0xf5, 0x32, 0xf5, 0xca, + 0x7b, 0xee, 0xbd, 0x14, 0x20, 0x6f, 0xa8, 0x0f, 0x9d, 0x22, 0x49, 0x68, 0xea, 0xcd, 0x59, 0x11, + 0x2f, 0xd5, 0xa3, 0x06, 0x01, 0x9d, 0x70, 0xe4, 0x4a, 0x73, 0x34, 0xaf, 0xd7, 0x1c, 0xdd, 0xbb, + 0x00, 0x95, 0x71, 0xfc, 0x51, 0xb2, 0xf3, 0xf3, 0x8c, 0xca, 0x0a, 0xf6, 0xb0, 0xda, 0x71, 0x3c, + 0xa4, 0xb1, 0x9f, 0xaf, 0xc4, 0xe9, 0x5d, 0xac, 0x76, 0xee, 0x2f, 0x06, 0x6c, 0xe9, 0xa4, 0xe8, + 0x0b, 0x68, 0x86, 0x7c, 0x36, 0x38, 0x86, 0xb8, 0xa6, 0x7e, 0xbd, 0x86, 0x72, 0x7c, 0xa8, 0x5b, + 0x92, 0x31, 0xf5, 0xdd, 0x12, 0x7d, 0x0e, 0xd6, 0x35, 0x5a, 0x36, 0xae, 0xc8, 0xee, 0xcf, 0x26, + 0xb4, 0xc6, 0x62, 0x0e, 0xbe, 0x99, 0xae, 0x8f, 0xa0, 0xe9, 0xf3, 0xc9, 0xa5, 0xa6, 0xce, 0xdb, + 0xf5, 0xc1, 0x62, 0xb8, 0x61, 0xc9, 0x44, 0x9f, 0x41, 0x7b, 0x21, 0x87, 0x99, 0x92, 0x7c, 0x50, + 0x1f, 0xa4, 0x26, 0x1e, 0xd6, 0x6c, 0x1e, 0x98, 0xc9, 0xd1, 0xa0, 0x3a, 0xf0, 0x86, 0x40, 0x35, + 0x3f, 0xb0, 0x66, 0xf3, 0xc0, 0x42, 0x76, 0x5d, 0xd1, 0x4c, 0x36, 0x06, 0xaa, 0xd6, 0x8c, 0x35, + 0x1b, 0x7d, 0x09, 0xd6, 0x4a, 0x37, 0x63, 0xd1, 0x44, 0x36, 0xda, 0x53, 0xf6, 0x6c, 0x5c, 0x45, + 0xf0, 0xf6, 0x5d, 0x3a, 0xee, 0x45, 0x99, 0xe8, 0x54, 0x26, 0xee, 0x94, 0xd8, 0x38, 0x73, 0x7f, + 0x35, 0x60, 0x5b, 0xde, 0xc3, 0x03, 0x12, 0x05, 0xe1, 0x45, 0xed, 0x47, 0x03, 0x82, 0xc6, 0x8a, + 0x86, 0x89, 0xfa, 0x66, 0x10, 0x6b, 0x74, 0x1b, 0x1a, 0x5c, 0xa3, 0xb0, 0x70, 0x67, 0xd3, 0x7f, + 0x5e, 0x66, 0x9e, 0x5e, 0x24, 0x14, 0x0b, 0x36, 0x6f, 0xf0, 0xf2, 0xeb, 0xc7, 0x69, 0xbc, 0xaa, + 0xc1, 0xcb, 0x38, 0xdd, 0xe0, 0x65, 0xc4, 0x07, 0x73, 0x80, 0x2a, 0x1f, 0xea, 0x40, 0xfb, 0xde, + 0x93, 0xd9, 0xe3, 0xe9, 0x29, 0xb6, 0x6f, 0x20, 0x0b, 0x9a, 0x67, 0xc7, 0xb3, 0xb3, 0x53, 0xdb, + 0xe0, 0xf8, 0x64, 0x36, 0x1e, 0x1f, 0xe3, 0x67, 0xf6, 0x4d, 0xbe, 0x99, 0x3d, 0x9e, 0x3e, 0x7b, + 0x7a, 0x7a, 0xdf, 0x36, 0x51, 0x17, 0xac, 0x87, 0x5f, 0x4f, 0xa6, 0x4f, 0xce, 0xf0, 0xf1, 0xd8, + 0x6e, 0xa0, 0xb7, 0x60, 0x57, 0xc4, 0x78, 0x15, 0xd8, 0x3c, 0x71, 0x5f, 0x5c, 0x1e, 0x1a, 0x7f, + 0x5e, 0x1e, 0x1a, 0xff, 0x5c, 0x1e, 0x1a, 0xdf, 0xf5, 0x02, 0xe6, 0x55, 0xe2, 0x3c, 0x29, 0x6e, + 0xde, 0x12, 0x2f, 0xfb, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x3f, 0xd9, 0x07, 0xdd, + 0x09, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1097,6 +1127,18 @@ func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Exemplar != nil { { size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) @@ -1181,6 +1223,18 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if len(m.Quantile) > 0 { for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { { @@ -1266,32 +1320,44 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } if len(m.PositiveCount) > 0 { for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { - f2 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + f5 := math.Float64bits(float64(m.PositiveCount[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) } i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) i-- dAtA[i] = 0x72 } if len(m.PositiveDelta) > 0 { - var j3 int - dAtA5 := make([]byte, len(m.PositiveDelta)*10) + var j6 int + dAtA8 := make([]byte, len(m.PositiveDelta)*10) for _, num := range m.PositiveDelta { - x4 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x4 >= 1<<7 { - dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) - j3++ - x4 >>= 7 - } - dAtA5[j3] = uint8(x4) - j3++ - } - i -= j3 - copy(dAtA[i:], dAtA5[:j3]) - i = encodeVarintMetrics(dAtA, i, uint64(j3)) + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintMetrics(dAtA, i, uint64(j6)) i-- dAtA[i] = 0x6a } @@ -1311,30 +1377,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCount) > 0 { for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { - f6 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + f9 := math.Float64bits(float64(m.NegativeCount[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9)) } i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) i-- dAtA[i] = 0x5a } if len(m.NegativeDelta) > 0 { - var j7 int - dAtA9 := make([]byte, len(m.NegativeDelta)*10) + var j10 int + dAtA12 := make([]byte, len(m.NegativeDelta)*10) for _, num := range m.NegativeDelta { - x8 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x8 >= 1<<7 { - dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) - j7++ - x8 >>= 7 - } - dAtA9[j7] = uint8(x8) - j7++ - } - i -= j7 - copy(dAtA[i:], dAtA9[:j7]) - i = encodeVarintMetrics(dAtA, i, uint64(j7)) + x11 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x11 >= 1<<7 { + dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80) + j10++ + x11 >>= 7 + } + dAtA12[j10] = uint8(x11) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA12[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) i-- dAtA[i] = 0x52 } @@ -1785,6 +1851,10 @@ func (m *Counter) Size() (n int) { l = m.Exemplar.Size() n += 1 + l + sovMetrics(uint64(l)) } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1827,6 +1897,10 @@ func (m *Summary) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1913,6 +1987,10 @@ func (m *Histogram) Size() (n int) { if len(m.PositiveCount) > 0 { n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2316,6 +2394,42 @@ func (m *Counter) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -2504,6 +2618,42 @@ func (m *Summary) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -3086,6 +3236,42 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 6bbea622..8e225bb3 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -51,6 +51,8 @@ message Gauge { message Counter { double value = 1; Exemplar exemplar = 2; + + google.protobuf.Timestamp created_timestamp = 3 [(gogoproto.nullable) = true]; } message Quantile { @@ -62,6 +64,8 @@ message Summary { uint64 sample_count = 1; double sample_sum = 2; repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; + + google.protobuf.Timestamp created_timestamp = 4 [(gogoproto.nullable) = true]; } message Untyped { @@ -75,6 +79,8 @@ message Histogram { // Buckets for the conventional histogram. repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + google.protobuf.Timestamp created_timestamp = 15 [(gogoproto.nullable) = true]; + // Everything below here is for native histograms (also known as sparse histograms). // Native histograms are an experimental feature without stability guarantees. @@ -97,6 +103,9 @@ message Histogram { repeated double negative_count = 11; // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float @@ -144,4 +153,4 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; -} +} \ No newline at end of file diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index ddfb26b1..161aa85a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -44,7 +44,9 @@ import ( "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/util/zeropool" ) const ( @@ -58,6 +60,11 @@ const ( maxInt64 = 9223372036854774784 // The smallest SampleValue that can be converted to an int64 without underflow. minInt64 = -9223372036854775808 + + // Max initial size for the pooled points slices. + // The getHPointSlice and getFPointSlice functions are called with an estimated size which often can be + // over-estimated. + maxPointsSliceSize = 5000 ) type engineMetrics struct { @@ -69,6 +76,7 @@ type engineMetrics struct { queryPrepareTime prometheus.Observer queryInnerEval prometheus.Observer queryResultSort prometheus.Observer + querySamples prometheus.Counter } // convertibleToInt64 returns true if v does not over-/underflow an int64. @@ -128,11 +136,35 @@ type Query interface { String() string } -type QueryOpts struct { +type PrometheusQueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. - EnablePerStepStats bool + enablePerStepStats bool // Lookback delta duration for this query. - LookbackDelta time.Duration + lookbackDelta time.Duration +} + +var _ QueryOpts = &PrometheusQueryOpts{} + +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { + return &PrometheusQueryOpts{ + enablePerStepStats: enablePerStepStats, + lookbackDelta: lookbackDelta, + } +} + +func (p *PrometheusQueryOpts) EnablePerStepStats() bool { + return p.enablePerStepStats +} + +func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { + return p.lookbackDelta +} + +type QueryOpts interface { + // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. + EnablePerStepStats() bool + // Lookback delta duration for this query. + LookbackDelta() time.Duration } // query implements the Query interface. @@ -188,7 +220,8 @@ func (q *query) Cancel() { // Close implements the Query interface. func (q *query) Close() { for _, s := range q.matrix { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } @@ -331,6 +364,12 @@ func NewEngine(opts EngineOpts) *Engine { Name: "queries_concurrent_max", Help: "The max number of concurrent queries.", }), + querySamples: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_samples_total", + Help: "The total number of samples loaded by all queries.", + }), queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"), @@ -356,6 +395,7 @@ func NewEngine(opts EngineOpts) *Engine { metrics.maxConcurrentQueries, metrics.queryLogEnabled, metrics.queryLogFailures, + metrics.querySamples, queryResultSummary, ) } @@ -398,69 +438,74 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { - expr, err := parser.ParseExpr(qs) +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + finishQueue, err := ng.queueActive(ctx, qry) if err != nil { return nil, err } - qry, err := ng.newQuery(q, opts, expr, ts, ts, 0) + defer finishQueue() + expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } - qry.q = qs + if err := ng.validateOpts(expr); err != nil { + return nil, err + } + *pExpr = PreprocessExpr(expr, ts, ts) return qry, nil } // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { + return nil, err + } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + if err := ng.validateOpts(expr); err != nil { + return nil, err + } if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry, err := ng.newQuery(q, opts, expr, start, end, interval) - if err != nil { - return nil, err - } - qry.q = qs + *pExpr = PreprocessExpr(expr, start, end) return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { - if err := ng.validateOpts(expr); err != nil { - return nil, err - } - - // Default to empty QueryOpts if not provided. +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = &QueryOpts{} + opts = NewPrometheusQueryOpts(false, 0) } - lookbackDelta := opts.LookbackDelta + lookbackDelta := opts.LookbackDelta() if lookbackDelta <= 0 { lookbackDelta = ng.lookbackDelta } es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), Start: start, End: end, Interval: interval, LookbackDelta: lookbackDelta, } qry := &query{ + q: qs, stmt: es, ng: ng, stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), queryable: q, } - return qry, nil + return &es.Expr, qry } var ( @@ -534,9 +579,12 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // // At this point per query only one EvalStmt is evaluated. Alert and record // statements are not handled by the Engine. -func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annotations.Annotations, err error) { ng.metrics.currentQueries.Inc() - defer ng.metrics.currentQueries.Dec() + defer func() { + ng.metrics.currentQueries.Dec() + ng.metrics.querySamples.Add(float64(q.sampleStats.TotalSamples)) + }() ctx, cancel := context.WithTimeout(ctx, ng.timeout) q.cancel = cancel @@ -576,18 +624,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) - // Log query in active log. The active log guarantees that we don't run over - // MaxConcurrent queries. - if ng.activeQueryTracker != nil { - queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) - if err != nil { - queueSpanTimer.Finish() - return nil, nil, contextErr(err, "query queue") - } - defer ng.activeQueryTracker.Delete(queryIndex) + finishQueue, err := ng.queueActive(ctx, q) + if err != nil { + return nil, nil, err } - queueSpanTimer.Finish() + defer finishQueue() // Cancel when execution is done or an error was raised. defer q.cancel() @@ -610,6 +651,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) } +// Log query in active log. The active log guarantees that we don't run over +// MaxConcurrent queries. +func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) { + if ng.activeQueryTracker == nil { + return func() {}, nil + } + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + queueSpanTimer.Finish() + return func() { ng.activeQueryTracker.Delete(queryIndex) }, err +} + func timeMilliseconds(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) } @@ -619,17 +672,17 @@ func durationMilliseconds(d time.Duration) int64 { } // execEvalStmt evaluates the expression of an evaluation statement for the given time range. -func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) mint, maxt := ng.findMinMaxTime(s) - querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) + querier, err := query.queryable.Querier(mint, maxt) if err != nil { prepareSpanTimer.Finish() return nil, nil, err } defer querier.Close() - ng.populateSeries(querier, s) + ng.populateSeries(ctxPrepare, querier, s) prepareSpanTimer.Finish() // Modify the offset of vector and matrix selectors for the @ modifier @@ -679,11 +732,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}} + if len(s.Histograms) > 0 { + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + } else { + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + } } return vector, warnings, nil case parser.ValueTypeScalar: - return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil + return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: return mat, warnings, nil default: @@ -740,8 +797,7 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { ts int64 = math.MaxInt64 ) for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { subqOffset += n.OriginalOffset subqRange += n.Range if n.Timestamp != nil { @@ -777,7 +833,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { maxTimestamp = end } evalRange = 0 - case *parser.MatrixSelector: evalRange = n.Range } @@ -810,20 +865,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } else { offsetMilliseconds := durationMilliseconds(subqOffset) start = start - offsetMilliseconds - durationMilliseconds(subqRange) - end = end - offsetMilliseconds + end -= offsetMilliseconds } if evalRange == 0 { - start = start - durationMilliseconds(s.LookbackDelta) + start -= durationMilliseconds(s.LookbackDelta) } else { // For all matrix queries we want to ensure that we have (end-start) + range selected // this way we have `range` data before the start time - start = start - durationMilliseconds(evalRange) + start -= durationMilliseconds(evalRange) } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) - start = start - offsetMilliseconds - end = end - offsetMilliseconds + start -= offsetMilliseconds + end -= offsetMilliseconds return start, end } @@ -831,8 +886,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { var interval time.Duration for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { interval = n.Step if n.Step == 0 { interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond @@ -842,7 +896,7 @@ func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { return interval } -func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { +func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets // the variable. @@ -865,7 +919,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { } evalRange = 0 hints.By, hints.Grouping = extractGroupsFromPath(path) - n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) + n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...) case *parser.MatrixSelector: evalRange = n.Range @@ -898,14 +952,13 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { if len(p) == 0 { return false, nil } - switch n := p[len(p)-1].(type) { - case *parser.AggregateExpr: + if n, ok := p[len(p)-1].(*parser.AggregateExpr); ok { return !n.Without, n.Grouping } return false, nil } -func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) { +func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: return checkAndExpandSeriesSet(ctx, e.VectorSelector) @@ -920,7 +973,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.War return nil, nil } -func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) { +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws annotations.Annotations, err error) { for it.Next() { select { case <-ctx.Done(): @@ -934,14 +987,15 @@ func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.S type errWithWarnings struct { err error - warnings storage.Warnings + warnings annotations.Annotations } func (e errWithWarnings) Error() string { return e.err.Error() } -// An evaluator evaluates given expressions over given fixed timestamps. It -// is attached to an engine through which it connects to a querier and reports -// errors. On timeout or cancellation of its context it terminates. +// An evaluator evaluates the given expressions over the given fixed +// timestamps. It is attached to an engine through which it connects to a +// querier and reports errors. On timeout or cancellation of its context it +// terminates. type evaluator struct { ctx context.Context @@ -968,7 +1022,7 @@ func (ev *evaluator) error(err error) { } // recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { +func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp *error) { e := recover() if e == nil { return @@ -984,7 +1038,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err - *ws = append(*ws, err.warnings...) + ws.Merge(err.warnings) case error: *errp = err default: @@ -992,7 +1046,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { +func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) v, ws = ev.eval(expr) @@ -1061,19 +1115,19 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { +func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) originalNumSamples := ev.currentSamples - var warnings storage.Warnings + var warnings annotations.Annotations for i, e := range exprs { // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. val, ws := ev.eval(e) - warnings = append(warnings, ws...) + warnings.Merge(ws) matrixes[i] = val.(Matrix) // Keep a copy of the original point slices so that they @@ -1095,7 +1149,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } } enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} - seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. tempNumSamples := ev.currentSamples var ( @@ -1136,23 +1194,24 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Points { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Points = series.Points[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) + matrixes[i][si].Histograms = series.Histograms[1:] + default: + continue + } + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } args[i] = vectors[i] @@ -1162,11 +1221,8 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Make the function call. enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) - if result.ContainsSameLabelset() { - ev.errorf("vector cannot contain metrics with the same labelset") - } enh.Out = result[:0] // Reuse result vector. - warnings = append(warnings, ws...) + warnings.Merge(ws) ev.currentSamples += len(result) // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also @@ -1181,10 +1237,16 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { + if result.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } mat := make(Matrix, len(result)) for i, s := range result { - s.Point.T = ts - mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + if s.H == nil { + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + } else { + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + } } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1195,29 +1257,40 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for _, sample := range result { h := sample.Metric.Hash() ss, ok := seriess[h] - if !ok { - ss = Series{ - Metric: sample.Metric, - Points: getPointSlice(numSteps), + if ok { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + } + if sample.H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) } + ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H}) } - sample.Point.T = ts - ss.Points = append(ss.Points, sample.Point) seriess[h] = ss - } } // Reuse the original point slices. for _, m := range origMatrixes { for _, s := range m { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } // Assemble the output matrix. By the time we get here we know we don't have too many samples. mat := make(Matrix, 0, len(seriess)) for _, ss := range seriess { - mat = append(mat, ss) + mat = append(mat, ss.Series) } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1226,7 +1299,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { +func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() @@ -1252,14 +1325,14 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } totalSamples := 0 for _, s := range mat { - totalSamples += len(s.Points) + totalSamples += len(s.Floats) + len(s.Histograms) vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, totalSamples, ws } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { +func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. if err := contextDone(ev.ctx, "expression evaluation"); err != nil { @@ -1288,17 +1361,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { param := unwrapStepInvariantExpr(e.Param) unwrapParenExpr(¶m) if s, ok := param.(*parser.StringLiteral); ok { - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregation(e, sortedGrouping, s.Val, v[0].(Vector), sh[0], enh) }, e.Expr) } - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var param float64 if e.Param != nil { - param = v[0].(Vector)[0].V + param = v[0].(Vector)[0].F } - return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil + return ev.aggregation(e, sortedGrouping, param, v[1].(Vector), sh[1], enh) }, e.Param, e.Expr) case *parser.Call: @@ -1312,15 +1385,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - if vs.Timestamp != nil { - // This is a special case only for "timestamp" since the offset - // needs to be adjusted for every point. - vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond - } - val, ws := ev.vectorSelector(vs, enh.Ts) - return call([]parser.Value{val}, e.Args, enh), ws - }) + return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) } } @@ -1328,7 +1393,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { var ( matrixArgIndex int matrixArg bool - warnings storage.Warnings + warnings annotations.Annotations ) for i := range e.Args { unwrapParenExpr(&e.Args[i]) @@ -1346,7 +1411,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Replacing parser.SubqueryExpr with parser.MatrixSelector. val, totalSamples, ws := ev.evalSubquery(subq) e.Args[i] = val - warnings = append(warnings, ws...) + warnings.Merge(ws) defer func() { // subquery result takes space in the memory. Get rid of that at the end. val.VectorSelector.(*parser.VectorSelector).Series = nil @@ -1357,8 +1422,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return call(v, e.Args, enh), warnings + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, e.Args, enh) + return vec, warnings.Merge(annos) }, e.Args...) } @@ -1372,7 +1438,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] - warnings = append(warnings, ws...) + warnings.Merge(ws) } } @@ -1383,7 +1449,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { selVS := sel.VectorSelector.(*parser.VectorSelector) ws, err := checkAndExpandSeriesSet(ev.ctx, sel) - warnings = append(warnings, ws...) + warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) } @@ -1395,7 +1461,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { stepRange = ev.interval } // Reuse objects across steps to save memory allocations. - points := getPointSlice(16) + var floats []FPoint + var histograms []HPoint inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} @@ -1403,8 +1470,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator for i, s := range selVS.Series { - ev.currentSamples -= len(points) - points = points[:0] + ev.currentSamples -= len(floats) + len(histograms) + if floats != nil { + floats = floats[:0] + } + if histograms != nil { + histograms = histograms[:0] + } chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() @@ -1417,7 +1489,6 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } ss := Series{ Metric: metric, - Points: getPointSlice(numSteps), } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1427,44 +1498,56 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // when looking up the argument, as there will be no gaps. for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } maxt := ts - offset mint := maxt - selRange // Evaluate the matrix selector for this series for this step. - points = ev.matrixIterSlice(it, mint, maxt, points) - if len(points) == 0 { + floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + if len(floats)+len(histograms) == 0 { continue } - inMatrix[0].Points = points + inMatrix[0].Floats = floats + inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. - outVec := call(inArgs, e.Args, enh) - ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) + outVec, annos := call(inArgs, e.Args, enh) + warnings.Merge(annos) + ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) + enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) + if outVec[0].H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: outVec[0].F, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: outVec[0].H, T: ts}) + } } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) } - if len(ss.Points) > 0 { - if ev.currentSamples+len(ss.Points) <= ev.maxSamples { + if len(ss.Floats)+len(ss.Histograms) > 0 { + if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples { mat = append(mat, ss) - ev.currentSamples += len(ss.Points) + ev.currentSamples += len(ss.Floats) + len(ss.Histograms) } else { ev.error(ErrTooManySamples(env)) } - } else { - putPointSlice(ss.Points) } ev.samplesStats.UpdatePeak(ev.currentSamples) } ev.samplesStats.UpdatePeak(ev.currentSamples) - ev.currentSamples -= len(points) - putPointSlice(points) + ev.currentSamples -= len(floats) + len(histograms) + putFPointSlice(floats) + putHPointSlice(histograms) // The absent_over_time function returns 0 or 1 series. So far, the matrix // contains multiple series. The following code will create a new series @@ -1473,7 +1556,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { steps := int(1 + (ev.endTimestamp-ev.startTimestamp)/ev.interval) // Iterate once to look for a complete series. for _, s := range mat { - if len(s.Points) == steps { + if len(s.Floats)+len(s.Histograms) == steps { return Matrix{}, warnings } } @@ -1481,7 +1564,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { found := map[int64]struct{}{} for i, s := range mat { - for _, p := range s.Points { + for _, p := range s.Floats { + found[p.T] = struct{}{} + } + for _, p := range s.Histograms { found[p.T] = struct{}{} } if i > 0 && len(found) == steps { @@ -1489,17 +1575,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - newp := make([]Point, 0, steps-len(found)) + newp := make([]FPoint, 0, steps-len(found)) for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { if _, ok := found[ts]; !ok { - newp = append(newp, Point{T: ts, V: 1}) + newp = append(newp, FPoint{T: ts, F: 1}) } } return Matrix{ Series{ Metric: createLabelsForAbsentFunction(e.Args[0]), - Points: newp, + Floats: newp, }, }, warnings } @@ -1519,8 +1605,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if e.Op == parser.SUB { for i := range mat { mat[i].Metric = dropMetricName(mat[i].Metric) - for j := range mat[i].Points { - mat[i].Points[j].V = -mat[i].Points[j].V + for j := range mat[i].Floats { + mat[i].Floats[j].F = -mat[i].Floats[j].F } } if mat.ContainsSameLabelset() { @@ -1532,9 +1618,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) - return append(enh.Out, Sample{Point: Point{V: val}}), nil + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: // Function to compute the join signature for each series. @@ -1545,37 +1631,37 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1594,15 +1680,24 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), - Points: getPointSlice(numSteps), } for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, h, ok := ev.vectorSelectorSingle(it, e, ts) + _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, H: h, T: ts}) + if h == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts}) + } ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1611,10 +1706,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - if len(ss.Points) > 0 { + if len(ss.Floats)+len(ss.Histograms) > 0 { mat = append(mat, ss) - } else { - putPointSlice(ss.Points) } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1686,7 +1779,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { res, ws := newEv.eval(e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples) } @@ -1705,15 +1798,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr)) } for i := range mat { - if len(mat[i].Points) != 1 { + if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } - for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { - mat[i].Points = append(mat[i].Points, Point{ - T: ts, - V: mat[i].Points[0].V, - H: mat[i].Points[0].H, - }) + for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { + if len(mat[i].Floats) > 0 { + mat[i].Floats = append(mat[i].Floats, FPoint{ + T: ts, + F: mat[i].Floats[0].F, + }) + } else { + mat[i].Histograms = append(mat[i].Histograms, HPoint{ + T: ts, + H: mat[i].Histograms[0].H, + }) + } ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1727,36 +1826,49 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -// vectorSelector evaluates a *parser.VectorSelector expression. -func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) { - ws, err := checkAndExpandSeriesSet(ev.ctx, node) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ev.ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - vec := make(Vector, 0, len(node.Series)) - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range node.Series { - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) - if ok { - vec = append(vec, Sample{ - Metric: node.Series[i].Labels(), - Point: Point{V: v, H: h, T: t}, - }) + seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) + for i, s := range vs.Series { + it := s.Iterator(nil) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + } - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtTimestamp(ts, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if vs.Timestamp != nil { + // This is a special case for "timestamp()" when the @ modifier is used, to ensure that + // we return a point for each time step in this case. + // See https://github.com/prometheus/prometheus/issues/8433. + vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) - return vec, ws + vec := make(Vector, 0, len(vs.Series)) + for i, s := range vs.Series { + it := seriesIterators[i] + t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if ok { + vec = append(vec, Sample{ + Metric: s.Labels(), + T: t, + F: f, + H: h, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + vec, annos := call([]parser.Value{vec}, e.Args, enh) + return vec, ws.Merge(annos) + }) } // vectorSelectorSingle evaluates an instant vector for the iterator of one time series. @@ -1776,14 +1888,14 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no } case chunkenc.ValFloat: t, v = it.At() - case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + case chunkenc.ValFloatHistogram: t, h = it.AtFloatHistogram() default: panic(fmt.Errorf("unknown value type %v", valueType)) } if valueType == chunkenc.ValNone || t > refTime { var ok bool - t, v, _, h, ok = it.PeekPrev() + t, v, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } @@ -1794,23 +1906,53 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no return t, v, h, true } -var pointPool = sync.Pool{} +var ( + fPointPool zeropool.Pool[[]FPoint] + hPointPool zeropool.Pool[[]HPoint] +) -func getPointSlice(sz int) []Point { - p := pointPool.Get() +func getFPointSlice(sz int) []FPoint { + if p := fPointPool.Get(); p != nil { + return p + } + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + + return make([]FPoint, 0, sz) +} + +// putFPointSlice will return a FPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. +func putFPointSlice(p []FPoint) { if p != nil { - return p.([]Point) + fPointPool.Put(p[:0]) + } +} + +// getHPointSlice will return a HPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. +func getHPointSlice(sz int) []HPoint { + if p := hPointPool.Get(); p != nil { + return p } - return make([]Point, 0, sz) + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + + return make([]HPoint, 0, sz) } -func putPointSlice(p []Point) { - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - pointPool.Put(p[:0]) +func putHPointSlice(p []HPoint) { + if p != nil { + hPointPool.Put(p[:0]) + } } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) { +func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -1838,13 +1980,15 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag Metric: series[i].Labels(), } - ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) - ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points))) + ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) + totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms)) + ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen) - if len(ss.Points) > 0 { + if totalLen > 0 { matrix = append(matrix, ss) } else { - putPointSlice(ss.Points) + putFPointSlice(ss.Floats) + putHPointSlice(ss.Histograms) } } return matrix, ws @@ -1858,24 +2002,54 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag // values). Any such points falling before mint are discarded; points that fall // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. -func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { - if len(out) > 0 && out[len(out)-1].T >= mint { +func (ev *evaluator) matrixIterSlice( + it *storage.BufferedSeriesIterator, mint, maxt int64, + floats []FPoint, histograms []HPoint, +) ([]FPoint, []HPoint) { + mintFloats, mintHistograms := mint, mint + + // First floats... + if len(floats) > 0 && floats[len(floats)-1].T >= mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; out[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop - copy(out, out[drop:]) - out = out[:len(out)-drop] + copy(floats, floats[drop:]) + floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mint = out[len(out)-1].T + 1 + mintFloats = floats[len(floats)-1].T + 1 } else { - ev.currentSamples -= len(out) - out = out[:0] + ev.currentSamples -= len(floats) + if floats != nil { + floats = floats[:0] + } + } + + // ...then the same for histograms. TODO(beorn7): Use generics? + if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive + } + ev.currentSamples -= drop + copy(histograms, histograms[drop:]) + histograms = histograms[:len(histograms)-drop] + // Only append points with timestamps after the last timestamp we have. + mintHistograms = histograms[len(histograms)-1].T + 1 + } else { + ev.currentSamples -= len(histograms) + if histograms != nil { + histograms = histograms[:0] + } } soughtValueType := it.Seek(maxt) @@ -1897,25 +2071,31 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintHistograms { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) } case chunkenc.ValFloat: - t, v := buf.At() - if value.IsStaleNaN(v) { + t, f := buf.At() + if value.IsStaleNaN(f) { continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + if t >= mintFloats { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } ev.currentSamples++ - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) } } } @@ -1927,21 +2107,27 @@ loop: if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, H: h}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) ev.currentSamples++ } case chunkenc.ValFloat: - t, v := it.At() - if t == maxt && !value.IsStaleNaN(v) { + t, f := it.At() + if t == maxt && !value.IsStaleNaN(f) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, V: v}) + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) ev.currentSamples++ } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return out + return floats, histograms } func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { @@ -1969,13 +2155,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, } func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - if matching.Card != parser.CardManyToMany { + switch { + case matching.Card != parser.CardManyToMany: panic("set operations must only use many-to-many matching") - } - if len(lhs) == 0 { // Short-circuit. + case len(lhs) == 0: // Short-circuit. enh.Out = append(enh.Out, rhs...) return enh.Out - } else if len(rhs) == 0 { + case len(rhs) == 0: enh.Out = append(enh.Out, lhs...) return enh.Out } @@ -2087,20 +2273,21 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } // Account for potentially swapped sidedness. - vl, vr := ls.V, rs.V + fl, fr := ls.F, rs.F hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { - vl, vr = vr, vl + fl, fr = fr, fl hl, hr = hr, hl } - value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr) - if returnBool { + floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) + switch { + case returnBool: if keep { - value = 1.0 + floatValue = 1.0 } else { - value = 0.0 + floatValue = 0.0 } - } else if !keep { + case !keep: continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) @@ -2128,13 +2315,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * insertedSigs[insertSig] = struct{}{} } - if (hl != nil && hr != nil) || (hl == nil && hr == nil) { - // Both lhs and rhs are of same type. - enh.Out = append(enh.Out, Sample{ - Metric: metric, - Point: Point{V: value, H: histogramValue}, - }) - } + enh.Out = append(enh.Out, Sample{ + Metric: metric, + F: floatValue, + H: histogramValue, + }) } return enh.Out } @@ -2193,7 +2378,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(labels.EmptyLabels()) + ret := enh.lb.Labels() enh.resultMetric[str] = ret return ret } @@ -2201,28 +2386,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.V, rhs.V + lf, rf := lhsSample.F, rhs.V + var rh *histogram.FloatHistogram + lh := lhsSample.H // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { - lv, rv = rv, lv + lf, rf = rf, lf + lh, rh = rh, lh } - value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) + float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { - value = rv + float = rf + histogram = rh } if returnBool { if keep { - value = 1.0 + float = 1.0 } else { - value = 0.0 + float = 0.0 } keep = true } if keep { - lhsSample.V = value + lhsSample.F = float + lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2233,7 +2423,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) + return labels.NewBuilder(l).Del(labels.MetricName).Labels() } // scalarBinop evaluates a binary operation between two Scalars. @@ -2277,16 +2467,33 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram // The histogram being added must have the larger schema // code (i.e. the higher resolution). if hrhs.Schema >= hlhs.Schema { - return 0, hlhs.Copy().Add(hrhs), true + return 0, hlhs.Copy().Add(hrhs).Compact(0), true } - return 0, hrhs.Copy().Add(hlhs), true + return 0, hrhs.Copy().Add(hlhs).Compact(0), true } return lhs + rhs, nil, true case parser.SUB: + if hlhs != nil && hrhs != nil { + // The histogram being subtracted must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + } + return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true + } return lhs - rhs, nil, true case parser.MUL: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Mul(rhs), true + } + if hlhs == nil && hrhs != nil { + return 0, hrhs.Copy().Mul(lhs), true + } return lhs * rhs, nil, true case parser.DIV: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Div(rhs), true + } return lhs / rhs, nil, true case parser.POW: return math.Pow(lhs, rhs), nil, true @@ -2314,9 +2521,10 @@ type groupedAggregation struct { hasFloat bool // Has at least 1 float64 sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated. labels labels.Labels - value float64 + floatValue float64 histogramValue *histogram.FloatHistogram - mean float64 + floatMean float64 + histogramMean *histogram.FloatHistogram groupCount int heap vectorByValueHeap reverseHeap vectorByReverseValueHeap @@ -2324,7 +2532,10 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. -func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + op := e.Op + without := e.Without + annos := annotations.Annotations{} result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2335,7 +2546,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } k = int64(f) if k < 1 { - return Vector{} + return Vector{}, annos } } var q float64 @@ -2366,8 +2577,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if op == parser.COUNT_VALUES { enh.resetBuilder(metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = enh.lb.Labels(labels.EmptyLabels()) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + metric = enh.lb.Labels() // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2386,27 +2597,34 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if !ok { var m labels.Labels enh.resetBuilder(metric) - if without { + switch { + case without: enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) - m = enh.lb.Labels(labels.EmptyLabels()) - } else if len(grouping) > 0 { + m = enh.lb.Labels() + case len(grouping) > 0: enh.lb.Keep(grouping...) - m = enh.lb.Labels(labels.EmptyLabels()) - } else { + m = enh.lb.Labels() + default: m = labels.EmptyLabels() } newAgg := &groupedAggregation{ labels: m, - value: s.V, - mean: s.V, + floatValue: s.F, + floatMean: s.F, groupCount: 1, } - if s.H == nil { + switch { + case s.H == nil: newAgg.hasFloat = true - } else if op == parser.SUM { + case op == parser.SUM: newAgg.histogramValue = s.H.Copy() newAgg.hasHistogram = true + case op == parser.AVG: + newAgg.histogramMean = s.H.Copy() + newAgg.hasHistogram = true + case op == parser.STDVAR || op == parser.STDDEV: + newAgg.groupCount = 0 } result[groupingKey] = newAgg @@ -2414,28 +2632,29 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without inputVecLen := int64(len(vec)) resultSize := k - if k > inputVecLen { + switch { + case k > inputVecLen: resultSize = inputVecLen - } else if k == 0 { + case k == 0: resultSize = 1 } switch op { case parser.STDVAR, parser.STDDEV: - result[groupingKey].value = 0 + result[groupingKey].floatValue = 0 case parser.TOPK, parser.QUANTILE: result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) result[groupingKey].heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.BOTTOMK: result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize) result[groupingKey].reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.GROUP: - result[groupingKey].value = 1 + result[groupingKey].floatValue = 1 } continue } @@ -2450,9 +2669,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if s.H.Schema >= group.histogramValue.Schema { group.histogramValue.Add(s.H) } else { - h := s.H.Copy() - h.Add(group.histogramValue) - group.histogramValue = h + group.histogramValue = s.H.Copy().Add(group.histogramValue) } } // Otherwise the aggregation contained floats @@ -2460,87 +2677,112 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // point in copying the histogram in that case. } else { group.hasFloat = true - group.value += s.V + group.floatValue += s.F } case parser.AVG: group.groupCount++ - if math.IsInf(group.mean, 0) { - if math.IsInf(s.V, 0) && (group.mean > 0) == (s.V > 0) { - // The `mean` and `s.V` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `mean` is correct - // already. - break + if s.H != nil { + group.hasHistogram = true + if group.histogramMean != nil { + left := s.H.Copy().Div(float64(group.groupCount)) + right := group.histogramMean.Copy().Div(float64(group.groupCount)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if s.H.Schema >= group.histogramMean.Schema { + toAdd := right.Mul(-1).Add(left) + group.histogramMean.Add(toAdd) + } else { + toAdd := left.Sub(right) + group.histogramMean = toAdd.Add(group.histogramMean) + } } - if !math.IsInf(s.V, 0) && !math.IsNaN(s.V) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - break + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + if math.IsInf(group.floatMean, 0) { + if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { + // The `floatMean` and `s.F` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `floatMean` is correct + // already. + break + } + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + break + } } + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.V/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: - if group.value < s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue < s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.MIN: - if group.value > s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue > s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.COUNT, parser.COUNT_VALUES: group.groupCount++ case parser.STDVAR, parser.STDDEV: - group.groupCount++ - delta := s.V - group.mean - group.mean += delta / float64(group.groupCount) - group.value += delta * (s.V - group.mean) + if s.H == nil { // Ignore native histograms. + group.groupCount++ + delta := s.F - group.floatMean + group.floatMean += delta / float64(group.groupCount) + group.floatValue += delta * (s.F - group.floatMean) + } case parser.TOPK: - if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { - if int64(len(group.heap)) == k { - if k == 1 { // For k==1 we can replace in-situ. - group.heap[0] = Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - } - break - } - heap.Pop(&group.heap) - } + // We build a heap of up to k elements, with the smallest element at heap[0]. + switch { + case int64(len(group.heap)) < k: heap.Push(&group.heap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) + case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): + // This new element is bigger than the previous smallest element - overwrite that. + group.heap[0] = Sample{ + F: s.F, + Metric: s.Metric, + } + if k > 1 { + heap.Fix(&group.heap, 0) // Maintain the heap invariant. + } } case parser.BOTTOMK: - if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { - if int64(len(group.reverseHeap)) == k { - if k == 1 { // For k==1 we can replace in-situ. - group.reverseHeap[0] = Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - } - break - } - heap.Pop(&group.reverseHeap) - } + // We build a heap of up to k elements, with the biggest element at heap[0]. + switch { + case int64(len(group.reverseHeap)) < k: heap.Push(&group.reverseHeap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) + case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)): + // This new element is smaller than the previous biggest element - overwrite that. + group.reverseHeap[0] = Sample{ + F: s.F, + Metric: s.Metric, + } + if k > 1 { + heap.Fix(&group.reverseHeap, 0) // Maintain the heap invariant. + } } case parser.QUANTILE: @@ -2555,16 +2797,26 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.value = aggr.mean + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) + continue + } + if aggr.hasHistogram { + aggr.histogramValue = aggr.histogramMean.Compact(0) + } else { + aggr.floatValue = aggr.floatMean + } case parser.COUNT, parser.COUNT_VALUES: - aggr.value = float64(aggr.groupCount) + aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.value = aggr.value / float64(aggr.groupCount) + aggr.floatValue /= float64(aggr.groupCount) case parser.STDDEV: - aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) case parser.TOPK: // The heap keeps the lowest value on top, so reverse it. @@ -2574,7 +2826,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.heap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. @@ -2587,29 +2839,38 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.reverseHeap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. case parser.QUANTILE: - aggr.value = quantile(q, aggr.heap) + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, e.Param.PositionRange())) + } + aggr.floatValue = quantile(q, aggr.heap) case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) continue } + if aggr.hasHistogram { + aggr.histogramValue.Compact(0) + } default: // For other aggregations, we already have the right value. } enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value, H: aggr.histogramValue}, + F: aggr.floatValue, + H: aggr.histogramValue, }) } - return enh.Out + return enh.Out, annos } // groupingKey builds and returns the grouping key for the given metric and @@ -2639,7 +2900,7 @@ func btos(b bool) float64 { // result of the op operation. func shouldDropMetricName(op parser.ItemType) bool { switch op { - case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD: + case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2: return true default: return false @@ -2690,9 +2951,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { switch n := expr.(type) { case *parser.VectorSelector: - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil @@ -2749,9 +3011,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { if isInvariant { n.Expr = newStepInvariantExpr(n.Expr) } - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 3da38ea0..8eb0cad8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package promql import ( @@ -27,6 +28,8 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" ) // FunctionCall is the type of a PromQL function implementation @@ -50,82 +53,93 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: float64(enh.Ts) / 1000, - }}} +func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return Vector{Sample{ + F: float64(enh.Ts) / 1000, + }}, nil } // extrapolatedRate is a utility function for rate/increase/delta. // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) - resultValue float64 - resultHistogram *histogram.FloatHistogram + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultFloat float64 + resultHistogram *histogram.FloatHistogram + firstT, lastT int64 + numSamplesMinusOne int + annos = annotations.Annotations{} ) - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { - return enh.Out + // We need either at least two Histograms and no Floats, or at least two + // Floats and no Histograms to calculate a rate. Otherwise, drop this + // Vector element. + metricName := samples.Metric.Get(labels.MetricName) + if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { + return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) + } + + if isCounter && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") { + annos.Add(annotations.NewPossibleNonCounterInfo(metricName, args[0].PositionRange())) } - if samples.Points[0].H != nil { - resultHistogram = histogramRate(samples.Points, isCounter) + switch { + case len(samples.Histograms) > 1: + numSamplesMinusOne = len(samples.Histograms) - 1 + firstT = samples.Histograms[0].T + lastT = samples.Histograms[numSamplesMinusOne].T + var newAnnos annotations.Annotations + resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) if resultHistogram == nil { - // Points are a mix of floats and histograms, or the histograms - // are not compatible with each other. - // TODO(beorn7): find a way of communicating the exact reason - return enh.Out + // The histograms are not compatible with each other. + return enh.Out, annos.Merge(newAnnos) } - } else { - resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V - prevValue := samples.Points[0].V - // We have to iterate through everything even in the non-counter - // case because we have to check that everything is a float. - // TODO(beorn7): Find a way to check that earlier, e.g. by - // handing in a []FloatPoint and a []HistogramPoint separately. - for _, currPoint := range samples.Points[1:] { - if currPoint.H != nil { - return nil // Range contains a mix of histograms and floats. - } - if !isCounter { - continue - } - if currPoint.V < prevValue { - resultValue += prevValue + case len(samples.Floats) > 1: + numSamplesMinusOne = len(samples.Floats) - 1 + firstT = samples.Floats[0].T + lastT = samples.Floats[numSamplesMinusOne].T + resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F + if !isCounter { + break + } + // Handle counter resets: + prevValue := samples.Floats[0].F + for _, currPoint := range samples.Floats[1:] { + if currPoint.F < prevValue { + resultFloat += prevValue } - prevValue = currPoint.V + prevValue = currPoint.F } + default: + // TODO: add RangeTooShortWarning + return enh.Out, annos } // Duration between first/last samples and boundary of range. - durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 - durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + durationToStart := float64(firstT-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-lastT) / 1000 - sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 - averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + sampledInterval := float64(lastT-firstT) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) // TODO(beorn7): Do this for histograms, too. - if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { - // Counters cannot be negative. If we have any slope at - // all (i.e. resultValue went up), we can extrapolate - // the zero point of the counter. If the duration to the - // zero point is shorter than the durationToStart, we - // take the zero point as the start of the series, - // thereby avoiding extrapolation to negative counter - // values. - durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + // Counters cannot be negative. If we have any slope at all + // (i.e. resultFloat went up), we can extrapolate the zero point + // of the counter. If the duration to the zero point is shorter + // than the durationToStart, we take the zero point as the start + // of the series, thereby avoiding extrapolation to negative + // counter values. + durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -153,24 +167,23 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod factor /= ms.Range.Seconds() } if resultHistogram == nil { - resultValue *= factor + resultFloat *= factor } else { - resultHistogram.Scale(factor) + resultHistogram.Mul(factor) } - return append(enh.Out, Sample{ - Point: Point{V: resultValue, H: resultHistogram}, - }) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is -// not a histogram. -func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { - prev := points[0].H // We already know that this is a histogram. +// not a histogram, and a warning wrapped in an annotation in that case. +// Otherwise, it returns the calculated histogram and an empty annotation. +func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { + prev := points[0].H last := points[len(points)-1].H if last == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } minSchema := prev.Schema if last.Schema < minSchema { @@ -185,7 +198,7 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. if !isCounter { @@ -211,57 +224,58 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { } h.CounterResetHint = histogram.GaugeType - return h.Compact(0) + return h.Compact(0), nil } -// === delta(Matrix parser.ValueTypeMatrix) Vector === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, false, false) } -// === rate(node parser.ValueTypeMatrix) Vector === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, true) } -// === increase(node parser.ValueTypeMatrix) Vector === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, false) } -// === irate(node parser.ValueTypeMatrix) Vector === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, true) } -// === idelta(node model.ValMatrix) Vector === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === idelta(node model.ValMatrix) (Vector, Annotations) === +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { +func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. - if len(samples.Points) < 2 { - return out + // TODO: add RangeTooShortWarning + if len(samples.Floats) < 2 { + return out, nil } - lastSample := samples.Points[len(samples.Points)-1] - previousSample := samples.Points[len(samples.Points)-2] + lastSample := samples.Floats[len(samples.Floats)-1] + previousSample := samples.Floats[len(samples.Floats)-2] var resultValue float64 - if isRate && lastSample.V < previousSample.V { + if isRate && lastSample.F < previousSample.F { // Counter reset. - resultValue = lastSample.V + resultValue = lastSample.F } else { - resultValue = lastSample.V - previousSample.V + resultValue = lastSample.F - previousSample.F } sampledInterval := lastSample.T - previousSample.T if sampledInterval == 0 { // Avoid dividing by 0. - return out + return out, nil } if isRate { @@ -269,9 +283,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{ - Point: Point{V: resultValue}, - }) + return append(out, Sample{F: resultValue}), nil } // Calculate the trend value at the given index i in raw data d. @@ -296,14 +308,14 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. - sf := vals[1].(Vector)[0].V + sf := vals[1].(Vector)[0].F // The trend factor argument. - tf := vals[2].(Vector)[0].V + tf := vals[2].(Vector)[0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -313,24 +325,24 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) } - l := len(samples.Points) + l := len(samples.Floats) // Can't do the smoothing operation with less than two points. if l < 2 { - return enh.Out + return enh.Out, nil } var s0, s1, b float64 // Set initial values. - s1 = samples.Points[0].V - b = samples.Points[1].V - samples.Points[0].V + s1 = samples.Floats[0].F + b = samples.Floats[1].F - samples.Floats[0].F // Run the smoothing operation. var x, y float64 for i := 1; i < l; i++ { // Scale the raw value against the smoothing factor. - x = sf * samples.Points[i].V + x = sf * samples.Floats[i].F // Scale the last smoothed value with the trend at this point. b = calcTrendValue(i-1, tf, s0, s1, b) @@ -339,129 +351,154 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{ - Point: Point{V: s1}, - }) + return append(enh.Out, Sample{F: s1}), nil } -// === sort(node parser.ValueTypeVector) Vector === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sort(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === sortDesc(node parser.ValueTypeVector) Vector === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. byValueSorter := vectorByValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V - max := vals[2].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F + max := vals[2].(Vector)[0].F if max < min { - return enh.Out + return enh.Out, nil } for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, math.Min(max, el.V))}, + F: math.Max(min, math.Min(max, el.F)), }) } - return enh.Out + return enh.Out, nil } -// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === +func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].Point.V + max := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Min(max, el.V)}, + F: math.Min(max, el.F), }) } - return enh.Out + return enh.Out, nil } -// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === +func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, el.V)}, + F: math.Max(min, el.F), }) } - return enh.Out + return enh.Out, nil } -// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) Vector === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].Point.V + toNearest = vals[1].(Vector)[0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest for _, el := range vec { - v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: v}, + F: f, }) } - return enh.Out + return enh.Out, nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{ - Point: Point{V: math.NaN()}, - }) + return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{ - Point: Point{V: v[0].V}, - }) + return append(enh.Out, Sample{F: v[0].F}), nil } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { el := vals[0].(Matrix)[0] - return append(enh.Out, Sample{ - Point: Point{V: aggrFn(el.Points)}, - }) + return append(enh.Out, Sample{F: aggrFn(el)}) +} + +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.Out, Sample{H: aggrFn(el)}) } -// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { +// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) + } + if len(firstSeries.Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + count := 1 + mean := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + count++ + left := h.H.Copy().Div(float64(count)) + right := mean.Copy().Div(float64(count)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if h.H.Schema >= mean.Schema { + toAdd := right.Mul(-1).Add(left) + mean.Add(toAdd) + } else { + toAdd := left.Sub(right) + mean = toAdd.Add(mean) + } + } + return mean + }), nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 - for _, v := range values { + for _, f := range s.Floats { count++ if math.IsInf(mean, 0) { - if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { - // The `mean` and `v.V` values are `Inf` of the same sign. They + if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { + // The `mean` and `f.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. continue } - if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -471,303 +508,377 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(v.V/count-mean/count, mean, c) + mean, c = kahanSumInc(f.F/count-mean/count, mean, c) } if math.IsInf(mean, 0) { return mean } return mean + c - }) + }), nil } -// === count_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - return float64(len(values)) - }) +// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(s Series) float64 { + return float64(len(s.Floats) + len(s.Histograms)) + }), nil } -// === last_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] + var f FPoint + if len(el.Floats) > 0 { + f = el.Floats[len(el.Floats)-1] + } + + var h HPoint + if len(el.Histograms) > 0 { + h = el.Histograms[len(el.Histograms)-1] + } + + if h.H == nil || h.T < f.T { + return append(enh.Out, Sample{ + Metric: el.Metric, + F: f.F, + }), nil + } return append(enh.Out, Sample{ Metric: el.Metric, - Point: Point{V: el.Points[len(el.Points)-1].V}, - }) -} - -// === max_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - max := values[0].V - for _, v := range values { - if v.V > max || math.IsNaN(max) { - max = v.V + H: h.H, + }), nil +} + +// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. max_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + max := s.Floats[0].F + for _, f := range s.Floats { + if f.F > max || math.IsNaN(max) { + max = f.F } } return max - }) + }), nil } -// === min_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - min := values[0].V - for _, v := range values { - if v.V < min || math.IsNaN(min) { - min = v.V +// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. min_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + min := s.Floats[0].F + for _, f := range s.Floats { + if f.F < min || math.IsNaN(min) { + min = f.F } } return min - }) + }), nil } -// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { +// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) + } + if len(firstSeries.Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + sum := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + // The histogram being added must have + // an equal or larger schema. + if h.H.Schema >= sum.Schema { + sum.Add(h.H) + } else { + sum = h.H.Copy().Add(sum) + } + } + return sum + }), nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 - for _, v := range values { - sum, c = kahanSumInc(v.V, sum, c) + for _, f := range s.Floats { + sum, c = kahanSumInc(f.F, sum, c) } if math.IsInf(sum, 0) { return sum } return sum + c - }) + }), nil } -// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V +// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] + if len(el.Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. quantile_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out, nil + } - values := make(vectorByValueHeap, 0, len(el.Points)) - for _, v := range el.Points { - values = append(values, Sample{Point: Point{V: v.V}}) + annos := annotations.Annotations{} + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } - return append(enh.Out, Sample{ - Point: Point{V: quantile(q, values)}, - }) + + values := make(vectorByValueHeap, 0, len(el.Floats)) + for _, f := range el.Floats { + values = append(values, Sample{F: f.F}) + } + return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stddev_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }) + }), nil } -// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { +// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stdvar_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }) + }), nil } -// === absent(Vector parser.ValueTypeVector) Vector === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Vector)) > 0 { - return enh.Out + return enh.Out, nil } return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), - Point: Point{V: 1}, - }) + F: 1, + }), nil } -// === absent_over_time(Vector parser.ValueTypeMatrix) Vector === +// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === // As this function has a matrix as argument, it does not get all the Series. // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, - Sample{ - Point: Point{V: 1}, - }) +func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return append(enh.Out, Sample{F: 1}), nil } -// === present_over_time(Vector parser.ValueTypeMatrix) Vector === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { +// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === +func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(s Series) float64 { return 1 - }) + }), nil } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(el.V)}, - }) + if el.H == nil { // Process only float samples. + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(el.Metric), + F: f(el.F), + }) + } } return enh.Out } -// === abs(Vector parser.ValueTypeVector) Vector === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Abs) +// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Abs), nil } -// === ceil(Vector parser.ValueTypeVector) Vector === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Ceil) +// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Ceil), nil } -// === floor(Vector parser.ValueTypeVector) Vector === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Floor) +// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Floor), nil } -// === exp(Vector parser.ValueTypeVector) Vector === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Exp) +// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Exp), nil } -// === sqrt(Vector VectorNode) Vector === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sqrt) +// === sqrt(Vector VectorNode) (Vector, Annotations) === +func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sqrt), nil } -// === ln(Vector parser.ValueTypeVector) Vector === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log) +// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log), nil } -// === log2(Vector parser.ValueTypeVector) Vector === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log2) +// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log2), nil } -// === log10(Vector parser.ValueTypeVector) Vector === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log10) +// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log10), nil } -// === sin(Vector parser.ValueTypeVector) Vector === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sin) +// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sin), nil } -// === cos(Vector parser.ValueTypeVector) Vector === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cos) +// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cos), nil } -// === tan(Vector parser.ValueTypeVector) Vector === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tan) +// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tan), nil } -// == asin(Vector parser.ValueTypeVector) Vector === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asin) +// == asin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asin), nil } -// == acos(Vector parser.ValueTypeVector) Vector === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acos) +// == acos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acos), nil } -// == atan(Vector parser.ValueTypeVector) Vector === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atan) +// == atan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atan), nil } -// == sinh(Vector parser.ValueTypeVector) Vector === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sinh) +// == sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sinh), nil } -// == cosh(Vector parser.ValueTypeVector) Vector === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cosh) +// == cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cosh), nil } -// == tanh(Vector parser.ValueTypeVector) Vector === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tanh) +// == tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tanh), nil } -// == asinh(Vector parser.ValueTypeVector) Vector === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asinh) +// == asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asinh), nil } -// == acosh(Vector parser.ValueTypeVector) Vector === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acosh) +// == acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acosh), nil } -// == atanh(Vector parser.ValueTypeVector) Vector === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atanh) +// == atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atanh), nil } -// === rad(Vector parser.ValueTypeVector) Vector === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 - }) + }), nil } -// === deg(Vector parser.ValueTypeVector) Vector === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi - }) + }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: math.Pi, - }}} +func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return Vector{Sample{F: math.Pi}}, nil } -// === sgn(Vector parser.ValueTypeVector) Vector === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { - if v < 0 { + switch { + case v < 0: return -1 - } else if v > 0 { + case v > 0: return 1 + default: + return v } - return v - }) + }), nil } -// === timestamp(Vector parser.ValueTypeVector) Vector === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: float64(el.T) / 1000}, + F: float64(el.T) / 1000, }) } - return enh.Out + return enh.Out, nil } func kahanSum(samples []float64) float64 { @@ -793,7 +904,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. -func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { +func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) { var ( n float64 sumX, cX float64 @@ -803,18 +914,18 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl initY float64 constY bool ) - initY = samples[0].V + initY = samples[0].F constY = true for i, sample := range samples { // Set constY to false if any new y values are encountered. - if constY && i > 0 && sample.V != initY { + if constY && i > 0 && sample.F != initY { constY = false } n += 1.0 x := float64(sample.T-interceptTime) / 1e3 sumX, cX = kahanSumInc(x, sumX, cX) - sumY, cY = kahanSumInc(sample.V, sumY, cY) - sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumY, cY = kahanSumInc(sample.F, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY) sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } if constY { @@ -823,10 +934,10 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl } return 0, initY } - sumX = sumX + cX - sumY = sumY + cY - sumXY = sumXY + cXY - sumX2 = sumX2 + cX2 + sumX += cX + sumY += cY + sumXY += cXY + sumX2 += cX2 covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n @@ -836,43 +947,39 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl return slope, intercept } -// === deriv(node parser.ValueTypeMatrix) Vector === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { - return enh.Out + if len(samples.Floats) < 2 { + return enh.Out, nil } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 - slope, _ := linearRegression(samples.Points, samples.Points[0].T) - return append(enh.Out, Sample{ - Point: Point{V: slope}, - }) + slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + return append(enh.Out, Sample{F: slope}), nil } -// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].V + duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { - return enh.Out + if len(samples.Floats) < 2 { + return enh.Out, nil } - slope, intercept := linearRegression(samples.Points, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{ - Point: Point{V: slope*duration + intercept}, - }) + return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -// === histogram_count(Vector parser.ValueTypeVector) Vector === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -882,14 +989,14 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Count}, + F: sample.H.Count, }) } - return enh.Out + return enh.Out, nil } -// === histogram_sum(Vector parser.ValueTypeVector) Vector === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -899,16 +1006,82 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: sample.H.Sum}, + F: sample.H.Sum, }) } - return enh.Out + return enh.Out, nil +} + +// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: math.Sqrt(variance), + }) + } + return enh.Out, nil } -// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - lower := vals[0].(Vector)[0].V - upper := vals[1].(Vector)[0].V +// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: variance, + }) + } + return enh.Out, nil +} + +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) for _, sample := range inVec { @@ -918,16 +1091,21 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramFraction(lower, upper, sample.H)}, + F: histogramFraction(lower, upper, sample.H), }) } - return enh.Out + return enh.Out, nil } -// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V +// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) + annos := annotations.Annotations{} + + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) + } if enh.signatureToMetricWithBuckets == nil { enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} @@ -951,8 +1129,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { - // Oops, no bucket label or malformed label value. Skip. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) continue } enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) @@ -960,12 +1137,12 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(labels.EmptyLabels()) + Labels() mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, sample.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) } @@ -978,69 +1155,88 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // At this data point, we have conventional histogram // buckets and a native histogram with the same name and // labels. Do not evaluate anything. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) continue } enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(sample.Metric), - Point: Point{V: histogramQuantile(q, sample.H)}, + F: histogramQuantile(q, sample.H), }) } for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { + res, forcedMonotonicity := bucketQuantile(q, mb.buckets) enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - Point: Point{V: bucketQuantile(q, mb.buckets)}, + F: res, }) + if forcedMonotonicity { + annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) + } } } - return enh.Out + return enh.Out, annos } -// === resets(Matrix parser.ValueTypeMatrix) Vector === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - +// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms resets := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current < prev { - resets++ + + if len(floats) > 1 { + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F + if current < prev { + resets++ + } + prev = current } - prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(resets)}, - }) -} + if len(histograms) > 1 { + prev := histograms[0].H + for _, sample := range histograms[1:] { + current := sample.H + if current.DetectReset(prev) { + resets++ + } + prev = current + } + } -// === changes(Matrix parser.ValueTypeMatrix) Vector === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] + return append(enh.Out, Sample{F: float64(resets)}), nil +} +// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := vals[0].(Matrix)[0].Floats changes := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V + + if len(floats) == 0 { + // TODO(beorn7): Only histogram values, still need to add support. + return enh.Out, nil + } + + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { changes++ } prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(changes)}, - }) + return append(enh.Out, Sample{F: float64(changes)}), nil } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === +func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1080,30 +1276,31 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } - return enh.Out + return enh.Out, nil } -// === Vector(s Scalar) Vector === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === Vector(s Scalar) (Vector, Annotations) === +func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: vals[0].(Vector)[0].V}, - }) + F: vals[0].(Vector)[0].F, + }), nil } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === +func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1148,16 +1345,17 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(labels.EmptyLabels()) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } - return enh.Out + return enh.Out, nil } // Common code for date related functions. @@ -1166,74 +1364,74 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: f(time.Unix(enh.Ts/1000, 0).UTC())}, + F: f(time.Unix(enh.Ts/1000, 0).UTC()), }) } for _, el := range vals[0].(Vector) { - t := time.Unix(int64(el.V), 0).UTC() + t := time.Unix(int64(el.F), 0).UTC() enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(t)}, + F: f(t), }) } return enh.Out } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) - }) + }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) - }) + }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) - }) + }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) - }) + }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) - }) + }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) - }) + }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) - }) + }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) - }) + }), nil } // FunctionCalls is a list of all functions supported by PromQL, including their types. @@ -1269,6 +1467,8 @@ var FunctionCalls = map[string]FunctionCall{ "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, @@ -1332,10 +1532,20 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V < s[j].V + return vi < vj } func (s vectorByValueHeap) Swap(i, j int) { @@ -1361,10 +1571,20 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V > s[j].V + return vi > vj } func (s vectorByReverseValueHeap) Swap(i, j int) { @@ -1414,7 +1634,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { } } - return b.Labels(labels.EmptyLabels()) + return b.Labels() } func stringFromArg(e parser.Expr) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go index 39933378..aff6eb15 100644 --- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go +++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go @@ -58,7 +58,7 @@ const ( ) func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType) + p, warning := textparse.New(in, contentType, false) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 190af2d5..58136266 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -20,6 +20,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/promql/parser/posrange" ) // Node is a generic interface for all nodes in an AST. @@ -45,7 +47,7 @@ type Node interface { Pretty(level int) string // PositionRange returns the position of the AST Node in the query string. - PositionRange() PositionRange + PositionRange() posrange.PositionRange } // Statement is a generic interface for all statements. @@ -94,7 +96,7 @@ type AggregateExpr struct { Param Expr // Parameter used by some aggregators. Grouping []string // The labels by which to group the Vector. Without bool // Whether to drop the given labels rather than keep them. - PosRange PositionRange + PosRange posrange.PositionRange } // BinaryExpr represents a binary expression between two child expressions. @@ -115,7 +117,7 @@ type Call struct { Func *Function // The function that was called. Args Expressions // Arguments used in the call. - PosRange PositionRange + PosRange posrange.PositionRange } // MatrixSelector represents a Matrix selection. @@ -125,7 +127,7 @@ type MatrixSelector struct { VectorSelector Expr Range time.Duration - EndPos Pos + EndPos posrange.Pos } // SubqueryExpr represents a subquery. @@ -143,27 +145,27 @@ type SubqueryExpr struct { StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration - EndPos Pos + EndPos posrange.Pos } // NumberLiteral represents a number. type NumberLiteral struct { Val float64 - PosRange PositionRange + PosRange posrange.PositionRange } // ParenExpr wraps an expression so it cannot be disassembled as a consequence // of operator precedence. type ParenExpr struct { Expr Expr - PosRange PositionRange + PosRange posrange.PositionRange } // StringLiteral represents a string. type StringLiteral struct { Val string - PosRange PositionRange + PosRange posrange.PositionRange } // UnaryExpr represents a unary operation on another expression. @@ -172,7 +174,7 @@ type UnaryExpr struct { Op ItemType Expr Expr - StartPos Pos + StartPos posrange.Pos } // StepInvariantExpr represents a query which evaluates to the same result @@ -184,7 +186,9 @@ type StepInvariantExpr struct { func (e *StepInvariantExpr) String() string { return e.Expr.String() } -func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } +func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { + return e.Expr.PositionRange() +} // VectorSelector represents a Vector selection. type VectorSelector struct { @@ -204,7 +208,7 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series - PosRange PositionRange + PosRange posrange.PositionRange } // TestStmt is an internal helper statement that allows execution @@ -215,8 +219,8 @@ func (TestStmt) String() string { return "test statement" } func (TestStmt) PromQLStmt() {} func (t TestStmt) Pretty(int) string { return t.String() } -func (TestStmt) PositionRange() PositionRange { - return PositionRange{ +func (TestStmt) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: -1, End: -1, } @@ -349,7 +353,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { //nolint: errcheck - Walk(inspector(f), node, nil) + Walk(f, node, nil) } // Children returns a list of all child nodes of a syntax tree node. @@ -368,13 +372,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: @@ -404,17 +409,11 @@ func Children(node Node) []Node { } } -// PositionRange describes a position in the input string of the parser. -type PositionRange struct { - Start Pos - End Pos -} - // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first, last Node) PositionRange { - return PositionRange{ +func mergeRanges(first, last Node) posrange.PositionRange { + return posrange.PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, } @@ -422,33 +421,33 @@ func mergeRanges(first, last Node) PositionRange { // Item implements the Node interface. // This makes it possible to call mergeRanges on them. -func (i *Item) PositionRange() PositionRange { - return PositionRange{ +func (i *Item) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: i.Pos, - End: i.Pos + Pos(len(i.Val)), + End: i.Pos + posrange.Pos(len(i.Val)), } } -func (e *AggregateExpr) PositionRange() PositionRange { +func (e *AggregateExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *BinaryExpr) PositionRange() PositionRange { +func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } -func (e *Call) PositionRange() PositionRange { +func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *EvalStmt) PositionRange() PositionRange { +func (e *EvalStmt) PositionRange() posrange.PositionRange { return e.Expr.PositionRange() } -func (e Expressions) PositionRange() PositionRange { +func (e Expressions) PositionRange() posrange.PositionRange { if len(e) == 0 { // Position undefined. - return PositionRange{ + return posrange.PositionRange{ Start: -1, End: -1, } @@ -456,39 +455,39 @@ func (e Expressions) PositionRange() PositionRange { return mergeRanges(e[0], e[len(e)-1]) } -func (e *MatrixSelector) PositionRange() PositionRange { - return PositionRange{ +func (e *MatrixSelector) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } -func (e *SubqueryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *SubqueryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } -func (e *NumberLiteral) PositionRange() PositionRange { +func (e *NumberLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *ParenExpr) PositionRange() PositionRange { +func (e *ParenExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *StringLiteral) PositionRange() PositionRange { +func (e *StringLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *UnaryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *UnaryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } -func (e *VectorSelector) PositionRange() PositionRange { +func (e *VectorSelector) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 45002132..45a30219 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_stddev": { + Name: "histogram_stddev", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_stdvar": { + Name: "histogram_stdvar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, @@ -387,7 +397,7 @@ var Functions = map[string]*Function{ } // getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := Functions[name] +func getFunction(name string, functions map[string]*Function) (*Function, bool) { + function, ok := functions[name] return function, ok } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 461e854a..676fd9fb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -21,23 +21,29 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql/parser/posrange" ) %} %union { - node Node - item Item - matchers []*labels.Matcher - matcher *labels.Matcher - label labels.Label - labels labels.Labels - lblList []labels.Label - strings []string - series []SequenceValue - uint uint64 - float float64 - duration time.Duration + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + lblList []labels.Label + strings []string + series []SequenceValue + histogram *histogram.FloatHistogram + descriptors map[string]interface{} + bucket_set []float64 + int int64 + uint uint64 + float float64 + duration time.Duration } @@ -54,6 +60,8 @@ IDENTIFIER LEFT_BRACE LEFT_BRACKET LEFT_PAREN +OPEN_HIST +CLOSE_HIST METRIC_IDENTIFIER NUMBER RIGHT_BRACE @@ -64,6 +72,20 @@ SPACE STRING TIMES +// Histogram Descriptors. +%token histogramDescStart +%token +SUM_DESC +COUNT_DESC +SCHEMA_DESC +OFFSET_DESC +NEGATIVE_OFFSET_DESC +BUCKETS_DESC +NEGATIVE_BUCKETS_DESC +ZERO_BUCKET_DESC +ZERO_BUCKET_WIDTH_DESC +%token histogramDescEnd + // Operators. %token operatorsStart %token @@ -145,6 +167,10 @@ START_METRIC_SELECTOR %type