From 63d8f08b4ec8b0cf01bbd2e6f60c188afbcd9db5 Mon Sep 17 00:00:00 2001 From: Sreeram Narayanan Date: Thu, 28 Aug 2025 13:53:01 +0530 Subject: [PATCH 1/3] fix: golangci-lint staticcheck errors --- agent/agent.go | 12 +-- agent/agent_test.go | 16 +-- agent/auto-config/auto_config.go | 2 +- agent/auto-config/auto_config_test.go | 18 ++-- agent/auto-config/auto_encrypt_test.go | 4 +- agent/auto-config/mock_test.go | 9 +- agent/auto-config/run.go | 2 +- agent/auto-config/tls.go | 4 +- agent/catalog_endpoint.go | 30 +++--- agent/checks/alias_test.go | 2 +- agent/checks/check_test.go | 8 +- agent/connect/ca/provider_aws.go | 2 +- agent/connect/ca/provider_consul_test.go | 2 +- agent/connect/ca/provider_vault.go | 9 +- agent/connect/parsing.go | 2 +- agent/discovery_chain_endpoint.go | 6 +- agent/dns.go | 31 +++--- .../property-override/structpatcher.go | 2 +- agent/grpc-external/limiter/limiter.go | 5 +- .../dataplane/get_envoy_bootstrap_params.go | 4 +- .../get_envoy_bootstrap_params_test.go | 4 +- .../services/peerstream/subscription_view.go | 4 +- .../peerstream/subscription_view_test.go | 8 +- .../services/resource/delete_test.go | 2 +- .../services/resource/list_test.go | 2 +- .../services/resource/testing/builder.go | 8 +- .../services/resource/write_status_test.go | 2 +- agent/grpc-internal/client_test.go | 4 +- agent/grpc-internal/resolver/resolver_test.go | 4 +- agent/hcp/telemetry/otel_exporter_test.go | 2 +- agent/hcp/telemetry/otel_sink_test.go | 8 +- agent/hcp/telemetry/otlp_transform_test.go | 2 +- agent/hcp/telemetry_provider_test.go | 2 +- agent/hcp/testing.go | 2 +- agent/health_endpoint.go | 16 +-- agent/http.go | 6 +- agent/http_test.go | 4 +- agent/intentions_endpoint.go | 6 +- agent/leafcert/leafcert_test_helpers.go | 4 +- agent/leafcert/structs.go | 2 +- agent/local/state.go | 2 +- agent/operator_endpoint.go | 6 +- agent/pool/pool.go | 2 +- agent/prepared_query_endpoint.go | 20 ++-- agent/proxycfg-glue/config_entry.go | 2 +- agent/proxycfg-glue/discovery_chain_test.go | 2 +- agent/proxycfg-glue/health_blocking.go | 4 +- agent/proxycfg-glue/health_test.go | 2 +- agent/proxycfg-glue/intentions.go | 2 +- .../internal_service_dump_test.go | 2 +- .../resolved_service_config_test.go | 2 +- .../proxycfg-glue/service_http_checks_test.go | 2 +- agent/proxycfg-glue/service_list.go | 2 +- agent/proxycfg-glue/service_list_test.go | 2 +- .../proxycfg-sources/catalog/config_source.go | 2 +- .../catalog/config_source_test.go | 2 +- agent/proxycfg/api_gateway.go | 6 +- agent/proxycfg/config_snapshot_glue.go | 4 +- agent/proxycfg/ingress_gateway.go | 8 +- agent/proxycfg/mesh_gateway.go | 4 +- agent/proxycfg/naming.go | 2 +- agent/proxycfg/naming_test.go | 4 +- agent/proxycfg/snapshot.go | 2 +- agent/proxycfg/state.go | 8 +- agent/proxycfg/state_test.go | 18 ++-- agent/proxycfg/terminating_gateway.go | 8 +- agent/proxycfg/testing_mesh_gateway.go | 4 +- agent/remote_exec.go | 4 +- agent/rpc/middleware/interceptors_test.go | 2 +- agent/rpc/peering/service.go | 18 ++-- agent/rpc/peering/service_test.go | 4 +- agent/rpcclient/configentry/configentry.go | 8 +- agent/rpcclient/configentry/view_test.go | 4 +- agent/rpcclient/health/health.go | 6 +- agent/rpcclient/health/view.go | 4 +- agent/rpcclient/health/view_test.go | 60 +++++------ agent/service_manager.go | 8 +- agent/structs/acl.go | 6 +- agent/structs/acl_templated_policy.go | 2 +- agent/structs/aclfilter/filter.go | 46 ++++---- agent/structs/aclfilter/filter_test.go | 100 +++++++++--------- agent/structs/config_entry.go | 12 +-- agent/structs/config_entry_discoverychain.go | 14 +-- agent/structs/config_entry_gateways.go | 24 ++--- .../config_entry_inline_certificate.go | 4 +- agent/structs/config_entry_intentions.go | 12 +-- agent/structs/config_entry_routes.go | 16 +-- agent/structs/connect_ca.go | 8 +- agent/structs/connect_proxy_config.go | 6 +- agent/structs/intention.go | 2 +- agent/structs/service_definition.go | 6 +- agent/structs/structs.go | 22 ++-- agent/structs/structs_test.go | 4 +- agent/testagent.go | 20 ++-- agent/ui_endpoint_test.go | 4 +- agent/util.go | 6 +- agent/xds/clusters.go | 7 +- agent/xds/delta_envoy_extender_ce_test.go | 5 +- agent/xds/delta_test.go | 4 +- agent/xds/listeners.go | 5 +- agent/xds/testing.go | 2 +- agent/xds/xds_protocol_helpers_test.go | 2 +- command/agent/agent_test.go | 4 +- .../catalog/list/nodes/catalog_list_nodes.go | 2 +- 104 files changed, 412 insertions(+), 437 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6dff34b53675..de7e5ac58389 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2374,7 +2374,7 @@ func (a *Agent) addServiceLocked(req addServiceLockedRequest) error { } } - req.Service.EnterpriseMeta.Normalize() + req.Service.Normalize() if err := a.validateService(req.Service, req.chkTypes); err != nil { return err @@ -2855,7 +2855,7 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType, func (a *Agent) addCheckLocked(check *structs.HealthCheck, chkType *structs.CheckType, persist bool, token string, source configSource) error { var service *structs.NodeService - check.EnterpriseMeta.Normalize() + check.Normalize() if check.ServiceID != "" { cid := check.CompoundServiceID() @@ -3630,7 +3630,7 @@ func (a *Agent) storePid() error { // Write out the PID pid := os.Getpid() - _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + _, err = fmt.Fprintf(pidFile, "%d", pid) if err != nil { return fmt.Errorf("Could not write to pid file: %s", err) } @@ -3674,8 +3674,8 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI // Register the services from config for _, service := range conf.Services { // Default service partition to the same as agent - if service.EnterpriseMeta.PartitionOrEmpty() == "" { - service.EnterpriseMeta.OverridePartition(a.AgentEnterpriseMeta().PartitionOrDefault()) + if service.PartitionOrEmpty() == "" { + service.OverridePartition(a.AgentEnterpriseMeta().PartitionOrDefault()) } ns := service.NodeService() @@ -3801,7 +3801,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI } else if !acl.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Service.PartitionOrDefault()) { a.logger.Info("Purging service file in wrong partition", "file", file, - "partition", p.Service.EnterpriseMeta.PartitionOrDefault(), + "partition", p.Service.PartitionOrDefault(), ) if err := os.Remove(file); err != nil { a.logger.Error("Failed purging service file", diff --git a/agent/agent_test.go b/agent/agent_test.go index 316adeb3ddf1..159bbd10d5e8 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -797,7 +797,7 @@ func test_createAlias(t *testing.T, agent *TestAgent, chk *structs.CheckType, ex found = true assert.Equal(t, expectedResult, c.Check.Status, "Check state should be %s, was %s in %#v", expectedResult, c.Check.Status, c.Check) srvID := structs.NewServiceID(srv.ID, structs.WildcardEnterpriseMetaInDefaultPartition()) - if err := agent.Agent.State.RemoveService(srvID); err != nil { + if err := agent.State.RemoveService(srvID); err != nil { fmt.Println("[DEBUG] Fail to remove service", srvID, ", err:=", err) } fmt.Println("[DEBUG] Service Removed", srvID, ", err:=", err) @@ -2273,7 +2273,7 @@ func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) { Status: api.HealthCritical, } - addr, err := firstAddr(a.Agent.apiServers, "https") + addr, err := firstAddr(a.apiServers, "https") require.NoError(t, err) url := fmt.Sprintf("https://%s/v1/agent/self", addr.String()) chk := &structs.CheckType{ @@ -5378,7 +5378,7 @@ func TestAutoConfig_Integration(t *testing.T) { defer client.Shutdown() retry.Run(t, func(r *retry.R) { - require.NotNil(r, client.Agent.tlsConfigurator.Cert()) + require.NotNil(r, client.tlsConfigurator.Cert()) }) // when this is successful we managed to get the gossip key and serf addresses to bind to @@ -5390,7 +5390,7 @@ func TestAutoConfig_Integration(t *testing.T) { require.NotEmpty(t, client.tokens.AgentToken()) // grab the existing cert - cert1 := client.Agent.tlsConfigurator.Cert() + cert1 := client.tlsConfigurator.Cert() require.NotNil(t, cert1) // force a roots rotation by updating the CA config @@ -5414,7 +5414,7 @@ func TestAutoConfig_Integration(t *testing.T) { // ensure that a new cert gets generated and pushed into the TLS configurator retry.Run(t, func(r *retry.R) { - require.NotEqual(r, cert1, client.Agent.tlsConfigurator.Cert()) + require.NotEqual(r, cert1, client.tlsConfigurator.Cert()) // check that the on disk certs match expectations data, err := os.ReadFile(filepath.Join(client.DataDir, "auto-config.json")) @@ -5428,7 +5428,7 @@ func TestAutoConfig_Integration(t *testing.T) { actual, err := tls.X509KeyPair([]byte(resp.Certificate.CertPEM), []byte(resp.Certificate.PrivateKeyPEM)) require.NoError(r, err) - require.Equal(r, client.Agent.tlsConfigurator.Cert(), &actual) + require.Equal(r, client.tlsConfigurator.Cert(), &actual) }) } @@ -5527,7 +5527,7 @@ func TestSharedRPCRouter(t *testing.T) { testrpc.WaitForTestAgent(t, srv.RPC, "dc1") - mgr, server := srv.Agent.baseDeps.Router.FindLANRoute() + mgr, server := srv.baseDeps.Router.FindLANRoute() require.NotNil(t, mgr) require.NotNil(t, server) @@ -5539,7 +5539,7 @@ func TestSharedRPCRouter(t *testing.T) { testrpc.WaitForTestAgent(t, client.RPC, "dc1") - mgr, server = client.Agent.baseDeps.Router.FindLANRoute() + mgr, server = client.baseDeps.Router.FindLANRoute() require.NotNil(t, mgr) require.NotNil(t, server) } diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index a1a5848f623f..6fd5c26a0c1c 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -95,7 +95,7 @@ func New(config Config) (*AutoConfig, error) { } } - if err := config.EnterpriseConfig.validateAndFinalize(); err != nil { + if err := config.validateAndFinalize(); err != nil { return nil, err } diff --git a/agent/auto-config/auto_config_test.go b/agent/auto-config/auto_config_test.go index 7c5c629be2f7..596208eb6f82 100644 --- a/agent/auto-config/auto_config_test.go +++ b/agent/auto-config/auto_config_test.go @@ -254,7 +254,7 @@ func TestInitialConfiguration_cancelled(t *testing.T) { } verify_outgoing = true `) - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load expectedRequest := pbautoconf.AutoConfigRequest{ Datacenter: "dc1", @@ -290,7 +290,7 @@ func TestInitialConfiguration_restored(t *testing.T) { verify_outgoing = true `) - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load indexedRoots, cert, extraCACerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") @@ -344,7 +344,7 @@ func TestInitialConfiguration_success(t *testing.T) { } verify_outgoing = true `) - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") @@ -423,10 +423,10 @@ func TestInitialConfiguration_retries(t *testing.T) { } verify_outgoing = true `) - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load // reduce the retry wait times to make this test run faster - mcfg.Config.Waiter = &retry.Waiter{MinFailures: 2, MaxWait: time.Millisecond} + mcfg.Waiter = &retry.Waiter{MinFailures: 2, MaxWait: time.Millisecond} indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") @@ -535,7 +535,7 @@ func TestGoRoutineManagement(t *testing.T) { } verify_outgoing = true `) - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load // prepopulation is going to grab the token to populate the correct cache key mcfg.tokens.On("AgentToken").Return("secret").Times(0) @@ -604,7 +604,7 @@ func TestGoRoutineManagement(t *testing.T) { waitForContexts := func() bool { ctxLock.Lock() defer ctxLock.Unlock() - return !(rootsCtx == nil || leafCtx == nil) + return rootsCtx != nil && leafCtx != nil } // wait for the cache notifications to get started @@ -676,8 +676,8 @@ func startedAutoConfig(t *testing.T, autoEncrypt bool) testAutoConfig { verify_outgoing = true `) } - mcfg.Config.Loader = loader.Load - mcfg.Config.FallbackLeeway = time.Nanosecond + mcfg.Loader = loader.Load + mcfg.FallbackLeeway = time.Nanosecond originalToken := "a5deaa25-11ca-48bf-a979-4c3a7aa4b9a9" diff --git a/agent/auto-config/auto_encrypt_test.go b/agent/auto-config/auto_encrypt_test.go index d0768080248c..09eef3bb2537 100644 --- a/agent/auto-config/auto_encrypt_test.go +++ b/agent/auto-config/auto_encrypt_test.go @@ -280,7 +280,7 @@ func TestAutoEncrypt_InitialCerts(t *testing.T) { resp.VerifyServerHostname = true }) - mcfg.Config.Waiter = &retry.Waiter{MinFailures: 2, MaxWait: time.Millisecond} + mcfg.Waiter = &retry.Waiter{MinFailures: 2, MaxWait: time.Millisecond} ac := AutoConfig{ config: &config.RuntimeConfig{ @@ -320,7 +320,7 @@ func TestAutoEncrypt_InitialConfiguration(t *testing.T) { } `) loader.opts.FlagValues.NodeName = &nodeName - mcfg.Config.Loader = loader.Load + mcfg.Loader = loader.Load indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, nodeName, datacenter, token) diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go index 0ef5084af264..2d39c9b608ef 100644 --- a/agent/auto-config/mock_test.go +++ b/agent/auto-config/mock_test.go @@ -35,19 +35,20 @@ func newMockDirectRPC(t *testing.T) *mockDirectRPC { func (m *mockDirectRPC) RPC(dc string, node string, addr net.Addr, method string, args interface{}, reply interface{}) error { var retValues mock.Arguments - if method == "AutoConfig.InitialConfiguration" { + switch method { + case "AutoConfig.InitialConfiguration": req := args.(*pbautoconf.AutoConfigRequest) csr := req.CSR req.CSR = "" retValues = m.Called(dc, node, addr, method, args, reply) req.CSR = csr - } else if method == "AutoEncrypt.Sign" { + case "AutoEncrypt.Sign": req := args.(*structs.CASignRequest) csr := req.CSR req.CSR = "" retValues = m.Called(dc, node, addr, method, args, reply) req.CSR = csr - } else { + default: retValues = m.Called(dc, node, addr, method, args, reply) } @@ -383,7 +384,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok true, ).Return(nil).Once() - rootRes := cache.FetchResult{Value: indexedRoots, Index: indexedRoots.QueryMeta.Index} + rootRes := cache.FetchResult{Value: indexedRoots, Index: indexedRoots.Index} rootsReq := structs.DCSpecificRequest{Datacenter: datacenter} // we should prepopulate the cache with the CA roots diff --git a/agent/auto-config/run.go b/agent/auto-config/run.go index ed3389c1880c..129121395b81 100644 --- a/agent/auto-config/run.go +++ b/agent/auto-config/run.go @@ -129,7 +129,7 @@ func (ac *AutoConfig) run(ctx context.Context, exit chan struct{}) { return -1 } expiry := cert.NotAfter.Add(ac.acConfig.FallbackLeeway) - return expiry.Sub(time.Now()) + return time.Until(expiry) } fallbackTimer := time.NewTimer(calcFallbackInterval()) diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go index dd2d6f9e25e1..e31d632d297b 100644 --- a/agent/auto-config/tls.go +++ b/agent/auto-config/tls.go @@ -97,7 +97,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er } // prepolutate roots cache - rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index} + rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.Index} rootsReq := ac.caRootsRequest() // getting the roots doesn't require a token so in order to potentially share the cache with another if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, structs.DefaultPeerKeyword, "", rootsReq.CacheInfo().Key); err != nil { @@ -110,7 +110,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er err = ac.acConfig.LeafCertManager.Prepopulate( context.Background(), leafReq.Key(), - certs.IssuedCert.RaftIndex.ModifyIndex, + certs.IssuedCert.ModifyIndex, &certs.IssuedCert, connect.EncodeSigningKeyID(cert.AuthorityKeyId), ) diff --git a/agent/catalog_endpoint.go b/agent/catalog_endpoint.go index 4847f052c8ab..0d660e88fd72 100644 --- a/agent/catalog_endpoint.go +++ b/agent/catalog_endpoint.go @@ -201,7 +201,7 @@ func (s *HTTPHandlers) CatalogDatacenters(resp http.ResponseWriter, req *http.Re parseCacheControl(resp, req, &args.QueryOptions) var out []string - if args.QueryOptions.UseCache { + if args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogDatacentersName, &args) if err != nil { metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_datacenters"}, 1, @@ -251,12 +251,12 @@ RETRY_ONCE: if err := s.agent.RPC(req.Context(), "Catalog.ListNodes", &args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() s.agent.TranslateAddresses(args.Datacenter, out.Nodes, dnsutil.TranslateAddressAcceptAny) @@ -285,7 +285,7 @@ func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Reque var out structs.IndexedServices defer setMeta(resp, &out.QueryMeta) - if args.QueryOptions.UseCache { + if args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogListServicesName, &args) if err != nil { metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_services"}, 1, @@ -306,14 +306,14 @@ func (s *HTTPHandlers) CatalogServices(resp http.ResponseWriter, req *http.Reque s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() // Use empty map instead of nil if out.Services == nil { @@ -377,7 +377,7 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R var out structs.IndexedServiceNodes defer setMeta(resp, &out.QueryMeta) - if args.QueryOptions.UseCache { + if args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CatalogServicesName, &args) if err != nil { metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_service_nodes"}, 1, @@ -398,14 +398,14 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() s.agent.TranslateAddresses(args.Datacenter, out.ServiceNodes, dnsutil.TranslateAddressAcceptAny) // Use empty list instead of nil @@ -453,12 +453,12 @@ RETRY_ONCE: s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() if out.NodeServices != nil { s.agent.TranslateAddresses(args.Datacenter, out.NodeServices, dnsutil.TranslateAddressAcceptAny) } @@ -518,12 +518,12 @@ RETRY_ONCE: s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() s.agent.TranslateAddresses(args.Datacenter, &out.NodeServices, dnsutil.TranslateAddressAcceptAny) // Use empty list instead of nil @@ -565,12 +565,12 @@ RETRY_ONCE: s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_gateway_services"}, 1, s.nodeMetricsLabels()) diff --git a/agent/checks/alias_test.go b/agent/checks/alias_test.go index 1f5662019929..a1f6b179435b 100644 --- a/agent/checks/alias_test.go +++ b/agent/checks/alias_test.go @@ -592,7 +592,7 @@ func TestCheckAlias_localInitialStatus(t *testing.T) { notify := newMockAliasNotify() // We fake a local service web to ensure check if passing works - notify.Notify.AddServiceID(structs.ServiceID{ID: "web"}) + notify.AddServiceID(structs.ServiceID{ID: "web"}) chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ diff --git a/agent/checks/check_test.go b/agent/checks/check_test.go index 9a45be23067a..e66673ad1f27 100644 --- a/agent/checks/check_test.go +++ b/agent/checks/check_test.go @@ -475,7 +475,7 @@ func TestCheckHTTP_DisableRedirects(t *testing.T) { })) defer server1.Close() - server2 := httptest.NewServer(http.RedirectHandler(server1.URL, 301)) + server2 := httptest.NewServer(http.RedirectHandler(server1.URL, http.StatusMovedPermanently)) defer server2.Close() notif := mock.NewNotify() @@ -1129,11 +1129,7 @@ func TestStatusHandlerMaintainWarningStatusWhenCheckIsFlapping(t *testing.T) { func TestCheckTCPCritical(t *testing.T) { t.Parallel() - var ( - tcpServer net.Listener - ) - - tcpServer = mockTCPServer(`tcp`) + var tcpServer net.Listener = mockTCPServer(`tcp`) expectTCPStatus(t, `127.0.0.1:0`, api.HealthCritical) tcpServer.Close() } diff --git a/agent/connect/ca/provider_aws.go b/agent/connect/ca/provider_aws.go index 1ce5a5eba57d..b4c657e030c4 100644 --- a/agent/connect/ca/provider_aws.go +++ b/agent/connect/ca/provider_aws.go @@ -703,7 +703,7 @@ func ParseAWSCAConfig(raw map[string]interface{}) (*structs.AWSCAProviderConfig, return nil, fmt.Errorf("error decoding config: %s", err) } - if err := config.CommonCAProviderConfig.Validate(); err != nil { + if err := config.Validate(); err != nil { return nil, err } diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index 658a97d39bd9..50da93e11a2d 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -44,7 +44,7 @@ func newMockDelegate(t *testing.T, conf *structs.CAConfiguration) *consulCAMockD if s == nil { t.Fatalf("missing state store") } - if err := s.CASetConfig(conf.RaftIndex.CreateIndex, conf); err != nil { + if err := s.CASetConfig(conf.CreateIndex, conf); err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 692b9a568c23..383f2cbb3722 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -390,7 +390,8 @@ func (v *VaultProvider) setupIntermediatePKIPath() error { _, err := v.getCA(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath) if err != nil { - if err == ErrBackendNotMounted { + switch err { + case ErrBackendNotMounted: err := v.mountNamespaced(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath, &vaultapi.MountInput{ Type: "pki", Description: "intermediate CA backend for Consul Connect", @@ -403,11 +404,11 @@ func (v *VaultProvider) setupIntermediatePKIPath() error { // if the VaultProvider is ever reconfigured. v.isConsulMountedIntermediate = true - } else if err == ErrBackendNotInitialized { + case ErrBackendNotInitialized: // If this is the first time calling setupIntermediatePKIPath, the backend // will not have been initialized. Since the mount is ready we can suppress // this error. - } else { + default: return fmt.Errorf("unexpected error while fetching intermediate CA: %w", err) } } else { @@ -986,7 +987,7 @@ func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.Va config.IntermediatePKIPath += "/" } - if err := config.CommonCAProviderConfig.Validate(); err != nil { + if err := config.Validate(); err != nil { return nil, err } diff --git a/agent/connect/parsing.go b/agent/connect/parsing.go index f1e89fe0255b..24ea7afcfc48 100644 --- a/agent/connect/parsing.go +++ b/agent/connect/parsing.go @@ -198,7 +198,7 @@ func EncodeSigningKeyID(keyID []byte) string { return HexString(keyID) } // HexString returns a standard colon-separated hex value for the input // byte slice. This should be used with cert serial numbers and so on. func HexString(input []byte) string { - return strings.Replace(fmt.Sprintf("% x", input), " ", ":", -1) + return strings.ReplaceAll(fmt.Sprintf("% x", input), " ", ":") } // IsHexString returns true if the input is the output of HexString(). Meant diff --git a/agent/discovery_chain_endpoint.go b/agent/discovery_chain_endpoint.go index 69a5e668f46e..9704c3d8d19f 100644 --- a/agent/discovery_chain_endpoint.go +++ b/agent/discovery_chain_endpoint.go @@ -62,7 +62,7 @@ func (s *HTTPHandlers) DiscoveryChainRead(resp http.ResponseWriter, req *http.Re var out structs.DiscoveryChainResponse defer setMeta(resp, &out.QueryMeta) - if args.QueryOptions.UseCache { + if args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.CompiledDiscoveryChainName, &args) if err != nil { return nil, err @@ -80,13 +80,13 @@ func (s *HTTPHandlers) DiscoveryChainRead(resp http.ResponseWriter, req *http.Re if err := s.agent.RPC(req.Context(), "DiscoveryChain.Get", &args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() return discoveryChainReadResponse{Chain: out.Chain}, nil } diff --git a/agent/dns.go b/agent/dns.go index dd34b3b8bda7..b47483e8c268 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -261,8 +261,8 @@ func (d *DNSServer) Shutdown() { if d.Server != nil { d.logger.Info("Stopping server", "protocol", "DNS", - "address", d.Server.Addr, - "network", d.Server.Net, + "address", d.Addr, + "network", d.Net, ) err := d.Server.Shutdown() if err != nil { @@ -274,7 +274,7 @@ func (d *DNSServer) Shutdown() { // GetAddr is a function to return the server address if is not nil. func (d *DNSServer) GetAddr() string { if d.Server != nil { - return d.Server.Addr + return d.Addr } return "" } @@ -349,10 +349,11 @@ func recursorAddr(recursor string) (string, error) { START: _, _, err := net.SplitHostPort(recursor) if ae, ok := err.(*net.AddrError); ok { - if ae.Err == "missing port in address" { + switch ae.Err { + case "missing port in address": recursor = ipaddr.FormatAddressPort(recursor, 53) goto START - } else if ae.Err == "too many colons in address" { + case "too many colons in address": if ip := net.ParseIP(recursor); ip != nil && ip.To4() == nil { recursor = ipaddr.FormatAddressPort(recursor, 53) goto START @@ -1209,8 +1210,8 @@ RPC: // encodeKVasRFC1464 encodes a key-value pair according to RFC1464 func encodeKVasRFC1464(key, value string) (txt string) { // For details on these replacements c.f. https://www.ietf.org/rfc/rfc1464.txt - key = strings.Replace(key, "`", "``", -1) - key = strings.Replace(key, "=", "`=", -1) + key = strings.ReplaceAll(key, "`", "``") + key = strings.ReplaceAll(key, "=", "`=") // Backquote the leading spaces leadingSpacesRE := regexp.MustCompile("^ +") @@ -1222,7 +1223,7 @@ func encodeKVasRFC1464(key, value string) (txt string) { numTrailingSpaces := len(trailingSpacesRE.FindString(key)) key = trailingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numTrailingSpaces)) - value = strings.Replace(value, "`", "``", -1) + value = strings.ReplaceAll(value, "`", "``") return key + "=" + value } @@ -1816,11 +1817,12 @@ func makeARecord(qType uint16, ip net.IP, ttl time.Duration) dns.RR { func (d *DNSServer) makeRecordFromNode(node *structs.Node, qType uint16, qName string, cfg *dnsRequestConfig, maxRecursionLevel int) []dns.RR { ttl := cfg.NodeTTL addrTranslate := dnsutil.TranslateAddressAcceptDomain - if qType == dns.TypeA { + switch qType { + case dns.TypeA: addrTranslate |= dnsutil.TranslateAddressAcceptIPv4 - } else if qType == dns.TypeAAAA { + case dns.TypeAAAA: addrTranslate |= dnsutil.TranslateAddressAcceptIPv6 - } else { + default: addrTranslate |= dnsutil.TranslateAddressAcceptAny } @@ -1988,11 +1990,12 @@ MORE_REC: // Craft dns records from a CheckServiceNode struct func (d *DNSServer) makeNodeServiceRecords(lookup serviceLookup, node structs.CheckServiceNode, req *dns.Msg, ttl time.Duration, cfg *dnsRequestConfig, maxRecursionLevel int) ([]dns.RR, []dns.RR) { addrTranslate := dnsutil.TranslateAddressAcceptDomain - if req.Question[0].Qtype == dns.TypeA { + switch req.Question[0].Qtype { + case dns.TypeA: addrTranslate |= dnsutil.TranslateAddressAcceptIPv4 - } else if req.Question[0].Qtype == dns.TypeAAAA { + case dns.TypeAAAA: addrTranslate |= dnsutil.TranslateAddressAcceptIPv6 - } else { + default: addrTranslate |= dnsutil.TranslateAddressAcceptAny } diff --git a/agent/envoyextensions/builtin/property-override/structpatcher.go b/agent/envoyextensions/builtin/property-override/structpatcher.go index 76fe3be1a857..0071e5b447e4 100644 --- a/agent/envoyextensions/builtin/property-override/structpatcher.go +++ b/agent/envoyextensions/builtin/property-override/structpatcher.go @@ -97,7 +97,7 @@ func findTargetMessageAndField(m protoreflect.Message, parsedPath []string, patc // In the future, we could support Any by using the type field to initialize a struct for // the nested message value. return nil, nil, fmt.Errorf("variant-type message fields (google.protobuf.Any) are not supported") - case !(fieldDesc.Kind() == protoreflect.MessageKind): + case fieldDesc.Kind() != protoreflect.MessageKind: // Non-Any fields that could be used to serialize protos as bytes will get a clear error message // in this scenario. This also catches accidental use of non-complex fields as parent fields. return nil, nil, fmt.Errorf("path contains member of non-message field '%s' (type '%s'); this type does not support child fields", fieldName, fieldDesc.Kind()) diff --git a/agent/grpc-external/limiter/limiter.go b/agent/grpc-external/limiter/limiter.go index 44aaac616f99..f8ed49302c07 100644 --- a/agent/grpc-external/limiter/limiter.go +++ b/agent/grpc-external/limiter/limiter.go @@ -73,10 +73,7 @@ func (l *SessionLimiter) Run(ctx context.Context) { for { select { case <-l.wakeCh: - for { - if !l.overCapacity() { - break - } + for l.overCapacity() { if err := l.drainLimiter.Wait(ctx); err != nil { break diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index bbc2390a776b..7f49faa5e148 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -102,8 +102,8 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G return &pbdataplane.GetEnvoyBootstrapParamsResponse{ Identity: serviceName, Service: serviceName, - Partition: svc.EnterpriseMeta.PartitionOrDefault(), - Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), + Partition: svc.PartitionOrDefault(), + Namespace: svc.NamespaceOrDefault(), Config: bootstrapConfig, Datacenter: s.Datacenter, NodeName: svc.Node, diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go index bcff21cce50b..5178db55363f 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go @@ -167,8 +167,8 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { } require.Equal(t, serverDC, resp.Datacenter) - require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) - require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) + require.Equal(t, tc.registerReq.PartitionOrDefault(), resp.Partition) + require.Equal(t, tc.registerReq.NamespaceOrDefault(), resp.Namespace) requireConfigField(t, resp, proxyConfigKey, structpb.NewStringValue(proxyConfigValue)) require.Equal(t, tc.registerReq.Node, resp.NodeName) diff --git a/agent/grpc-external/services/peerstream/subscription_view.go b/agent/grpc-external/services/peerstream/subscription_view.go index 575729bc71df..a6812394c41f 100644 --- a/agent/grpc-external/services/peerstream/subscription_view.go +++ b/agent/grpc-external/services/peerstream/subscription_view.go @@ -66,8 +66,8 @@ func (e *exportedServiceRequest) NewMaterializer() (submatview.Materializer, err Subject: &pbsubscribe.SubscribeRequest_NamedSubject{ NamedSubject: &pbsubscribe.NamedSubject{ Key: e.req.ServiceName, - Namespace: e.req.EnterpriseMeta.NamespaceOrEmpty(), - Partition: e.req.EnterpriseMeta.PartitionOrEmpty(), + Namespace: e.req.NamespaceOrEmpty(), + Partition: e.req.PartitionOrEmpty(), }, }, Token: e.req.Token, diff --git a/agent/grpc-external/services/peerstream/subscription_view_test.go b/agent/grpc-external/services/peerstream/subscription_view_test.go index cd2f61e60feb..5deb91dd725f 100644 --- a/agent/grpc-external/services/peerstream/subscription_view_test.go +++ b/agent/grpc-external/services/peerstream/subscription_view_test.go @@ -165,7 +165,7 @@ func (s *store) simulateUpdates(ctx context.Context, events []map[string]stream. event.Index = idx s.pub.Publish([]stream.Event{event}) - s.stateMap.mu.Lock() + s.mu.Lock() svcState, ok := s.states[svc] if !ok { svcState = &serviceState{ @@ -174,7 +174,7 @@ func (s *store) simulateUpdates(ctx context.Context, events []map[string]stream. } s.states[svc] = svcState } - s.stateMap.mu.Unlock() + s.mu.Unlock() svcState.mu.Lock() svcState.idx = idx @@ -227,7 +227,7 @@ type serviceState struct { // // Snapshot implements stream.SnapshotFunc. func (s *snapshotHandler) Snapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { - s.stateMap.mu.Lock() + s.mu.Lock() svcState, ok := s.states[req.Subject.String()] if !ok { svcState = &serviceState{ @@ -236,7 +236,7 @@ func (s *snapshotHandler) Snapshot(req stream.SubscribeRequest, buf stream.Snaps } s.states[req.Subject.String()] = svcState } - s.stateMap.mu.Unlock() + s.mu.Unlock() svcState.mu.Lock() defer svcState.mu.Unlock() diff --git a/agent/grpc-external/services/resource/delete_test.go b/agent/grpc-external/services/resource/delete_test.go index 25a801205188..c984b7a154ee 100644 --- a/agent/grpc-external/services/resource/delete_test.go +++ b/agent/grpc-external/services/resource/delete_test.go @@ -196,7 +196,7 @@ func TestDelete_ACLs(t *testing.T) { mockACLResolver := &svc.MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). Return(tc.authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver + builder.ServiceImpl().ACLResolver = mockACLResolver // Exercise ACL. _, err = client.Delete(testContext(t), &pbresource.DeleteRequest{Id: rsp.Resource.Id}) diff --git a/agent/grpc-external/services/resource/list_test.go b/agent/grpc-external/services/resource/list_test.go index 43d5def0c37b..3d3f4b862863 100644 --- a/agent/grpc-external/services/resource/list_test.go +++ b/agent/grpc-external/services/resource/list_test.go @@ -347,7 +347,7 @@ func roundTripList(t *testing.T, authz acl.Authorizer) (*pbresource.Resource, *p mockACLResolver := &svc.MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). Return(authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver + builder.ServiceImpl().ACLResolver = mockACLResolver rsp2, err := client.List( ctx, diff --git a/agent/grpc-external/services/resource/testing/builder.go b/agent/grpc-external/services/resource/testing/builder.go index 8c4209674683..9dcec1a6f6c6 100644 --- a/agent/grpc-external/services/resource/testing/builder.go +++ b/agent/grpc-external/services/resource/testing/builder.go @@ -47,9 +47,7 @@ func (b *Builder) ServiceImpl() *svc.Server { } func (b *Builder) WithRegisterFns(registerFns ...func(resource.Registry)) *Builder { - for _, registerFn := range registerFns { - b.registerFns = append(b.registerFns, registerFn) - } + b.registerFns = append(b.registerFns, registerFns...) return b } @@ -61,9 +59,7 @@ func (b *Builder) WithACLResolver(aclResolver svc.ACLResolver) *Builder { // WithTenancies adds additional partitions and namespaces if default/default // is not sufficient. func (b *Builder) WithTenancies(tenancies ...*pbresource.Tenancy) *Builder { - for _, tenancy := range tenancies { - b.tenancies = append(b.tenancies, tenancy) - } + b.tenancies = append(b.tenancies, tenancies...) return b } diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index 4c524430251b..fce5cb758e20 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -60,7 +60,7 @@ func TestWriteStatus_ACL(t *testing.T) { mockACLResolver := &svc.MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). Return(tc.authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver + builder.ServiceImpl().ACLResolver = mockACLResolver // exercise ACL _, err = client.WriteStatus(testContext(t), validWriteStatusRequest(t, artist)) diff --git a/agent/grpc-internal/client_test.go b/agent/grpc-internal/client_test.go index 134a62aa4aae..5b94711c59e4 100644 --- a/agent/grpc-internal/client_test.go +++ b/agent/grpc-internal/client_test.go @@ -375,8 +375,8 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { func newConfig(t *testing.T, dc, agentType string) resolver.Config { n := t.Name() - s := strings.Replace(n, "/", "", -1) - s = strings.Replace(s, "_", "", -1) + s := strings.ReplaceAll(n, "/", "") + s = strings.ReplaceAll(s, "_", "") return resolver.Config{ Datacenter: dc, AgentType: agentType, diff --git a/agent/grpc-internal/resolver/resolver_test.go b/agent/grpc-internal/resolver/resolver_test.go index 53e7f6c743ae..8783af77aa82 100644 --- a/agent/grpc-internal/resolver/resolver_test.go +++ b/agent/grpc-internal/resolver/resolver_test.go @@ -168,8 +168,8 @@ func newServerMeta(name, dc, ip string, wan bool) *metadata.Server { func newConfig(t *testing.T, dc, agentType string) Config { n := t.Name() - s := strings.Replace(n, "/", "", -1) - s = strings.Replace(s, "_", "", -1) + s := strings.ReplaceAll(n, "/", "") + s = strings.ReplaceAll(s, "_", "") return Config{ Datacenter: dc, AgentType: agentType, diff --git a/agent/hcp/telemetry/otel_exporter_test.go b/agent/hcp/telemetry/otel_exporter_test.go index 6f23872259e1..1d480da8ff93 100644 --- a/agent/hcp/telemetry/otel_exporter_test.go +++ b/agent/hcp/telemetry/otel_exporter_test.go @@ -217,7 +217,7 @@ func TestExport_CustomMetrics(t *testing.T) { // Verify count for transform failure metric. require.NotNil(t, sv) require.NotNil(t, sv.AggregateSample) - require.Equal(t, 1, sv.AggregateSample.Count) + require.Equal(t, 1, sv.Count) }) } } diff --git a/agent/hcp/telemetry/otel_sink_test.go b/agent/hcp/telemetry/otel_sink_test.go index 683f33a3a40d..f6ae2ab4a545 100644 --- a/agent/hcp/telemetry/otel_sink_test.go +++ b/agent/hcp/telemetry/otel_sink_test.go @@ -484,21 +484,21 @@ func generateSamples(n int, labels map[string]string) map[string]metricdata.Metr func performSinkOperation(sink *OTELSink, k string, v metricdata.Metrics, errCh chan error) { key := strings.Split(k, ".") data := v.Data - switch data.(type) { + switch data := data.(type) { case metricdata.Gauge[float64]: - gauge, ok := data.(metricdata.Gauge[float64]) + gauge, ok := data if !ok { errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) } sink.SetGauge(key, float32(gauge.DataPoints[0].Value)) case metricdata.Sum[float64]: - sum, ok := data.(metricdata.Sum[float64]) + sum, ok := data if !ok { errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) } sink.IncrCounter(key, float32(sum.DataPoints[0].Value)) case metricdata.Histogram[float64]: - hist, ok := data.(metricdata.Histogram[float64]) + hist, ok := data if !ok { errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) } diff --git a/agent/hcp/telemetry/otlp_transform_test.go b/agent/hcp/telemetry/otlp_transform_test.go index d67df73d8343..ec7ac16e131f 100644 --- a/agent/hcp/telemetry/otlp_transform_test.go +++ b/agent/hcp/telemetry/otlp_transform_test.go @@ -339,7 +339,7 @@ func TestTransformOTLP_CustomMetrics(t *testing.T) { // Verify count for transform failure metric. require.NotNil(t, sv) require.NotNil(t, sv.AggregateSample) - require.Equal(t, 3, sv.AggregateSample.Count) + require.Equal(t, 3, sv.Count) }) } } diff --git a/agent/hcp/telemetry_provider_test.go b/agent/hcp/telemetry_provider_test.go index 6801b9271ebc..8a2c8acd4c65 100644 --- a/agent/hcp/telemetry_provider_test.go +++ b/agent/hcp/telemetry_provider_test.go @@ -281,7 +281,7 @@ func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { require.NotNil(t, interval, 1) sv := interval.Counters[tc.metricKey] assert.NotNil(t, sv.AggregateSample) - require.Equal(t, sv.AggregateSample.Count, 1) + require.Equal(t, sv.Count, 1) }) } } diff --git a/agent/hcp/testing.go b/agent/hcp/testing.go index 8ebaff3f9dcd..11f3ac0d7299 100644 --- a/agent/hcp/testing.go +++ b/agent/hcp/testing.go @@ -178,5 +178,5 @@ func (s *MockHCPServer) handleDiscover(r *http.Request, cluster resource.Resourc func errResponse(w http.ResponseWriter, err error) { log.Printf("ERROR 500: %s\n", err) w.WriteHeader(500) - w.Write([]byte(fmt.Sprintf(`{"error": %q}`, err.Error()))) + fmt.Fprintf(w, `{"error": %q}`, err.Error()) } diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 0001c35a1121..95eb329ed05b 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -45,12 +45,12 @@ RETRY_ONCE: if err := s.agent.RPC(req.Context(), "Health.ChecksInState", &args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() // Use empty list instead of nil if out.HealthChecks == nil { @@ -89,12 +89,12 @@ RETRY_ONCE: if err := s.agent.RPC(req.Context(), "Health.NodeChecks", &args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() // Use empty list instead of nil if out.HealthChecks == nil { @@ -135,12 +135,12 @@ RETRY_ONCE: if err := s.agent.RPC(req.Context(), "Health.ServiceChecks", &args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() // Use empty list instead of nil if out.HealthChecks == nil { @@ -241,10 +241,10 @@ func (s *HTTPHandlers) healthServiceNodes(resp http.ResponseWriter, req *http.Re return nil, err } - if args.QueryOptions.UseCache { + if args.UseCache { setCacheMeta(resp, &md) } - out.QueryMeta.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() _ = setMeta(resp, &out.QueryMeta) // Translate addresses after filtering so we don't waste effort. diff --git a/agent/http.go b/agent/http.go index eb6e186cd87b..077059991bc8 100644 --- a/agent/http.go +++ b/agent/http.go @@ -191,7 +191,7 @@ func (s *HTTPHandlers) handler() http.Handler { // Omit the leading slash. // Distinguish thing like /v1/query from /v1/query/ by having // an extra underscore. - path_label := strings.Replace(pattern[1:], "/", "_", -1) + path_label := strings.ReplaceAll(pattern[1:], "/", "_") // Register the wrapper. wrapper := func(resp http.ResponseWriter, req *http.Request) { @@ -426,7 +426,7 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc logURL += "" continue } - logURL = strings.Replace(logURL, token, "", -1) + logURL = strings.ReplaceAll(logURL, token, "") } httpLogger.Warn("This request used the token query parameter "+ "which is deprecated and will be removed in a future Consul version", @@ -886,7 +886,7 @@ func setResultsFilteredByACLs(resp http.ResponseWriter, filtered bool) { // setHeaders is used to set canonical response header fields func setHeaders(resp http.ResponseWriter, headers map[string]string) { for field, value := range headers { - resp.Header().Set(http.CanonicalHeaderKey(field), value) + resp.Header().Set(field, value) } } diff --git a/agent/http_test.go b/agent/http_test.go index 724639f30d8a..30f9a8b475ff 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -1793,7 +1793,7 @@ func TestHTTPServer_HandshakeTimeout(t *testing.T) { }) defer a.Shutdown() - addr, err := firstAddr(a.Agent.apiServers, "https") + addr, err := firstAddr(a.apiServers, "https") require.NoError(t, err) // Connect to it with a plain TCP client that doesn't attempt to send HTTP or // complete a TLS handshake. @@ -1861,7 +1861,7 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) { }) defer a.Shutdown() - addr, err := firstAddr(a.Agent.apiServers, strings.ToLower(tc.name)) + addr, err := firstAddr(a.apiServers, strings.ToLower(tc.name)) require.NoError(t, err) assertConn := func(conn net.Conn, wantOpen bool) { diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 4f0b188a0cc4..aad1d23b33ab 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -164,7 +164,7 @@ func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Reques var out structs.IndexedIntentionMatches defer setMeta(resp, &out.QueryMeta) - if s.agent.config.HTTPUseCache && args.QueryOptions.UseCache { + if s.agent.config.HTTPUseCache && args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.IntentionMatchName, args) if err != nil { return nil, err @@ -182,13 +182,13 @@ func (s *HTTPHandlers) IntentionMatch(resp http.ResponseWriter, req *http.Reques if err := s.agent.RPC(req.Context(), "Intention.Match", args, &out); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() // We must have an identical count of matches if len(out.Matches) != len(names) { diff --git a/agent/leafcert/leafcert_test_helpers.go b/agent/leafcert/leafcert_test_helpers.go index 5b0b3226cb38..8c46c10bcbcf 100644 --- a/agent/leafcert/leafcert_test_helpers.go +++ b/agent/leafcert/leafcert_test_helpers.go @@ -181,9 +181,9 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) ( } var serviceID *connect.SpiffeIDService - switch spiffeID.(type) { + switch spiffeID := spiffeID.(type) { case *connect.SpiffeIDService: - serviceID = spiffeID.(*connect.SpiffeIDService) + serviceID = spiffeID default: return nil, fmt.Errorf("unexpected spiffeID type %T", spiffeID) } diff --git a/agent/leafcert/structs.go b/agent/leafcert/structs.go index 8cd7375731fc..3d92bb786b37 100644 --- a/agent/leafcert/structs.go +++ b/agent/leafcert/structs.go @@ -38,7 +38,7 @@ type ConnectCALeafRequest struct { } func (r *ConnectCALeafRequest) Key() string { - r.EnterpriseMeta.Normalize() + r.Normalize() switch { case r.Agent != "": diff --git a/agent/local/state.go b/agent/local/state.go index 67e72aece0b4..ad420e73da0c 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -617,7 +617,7 @@ func (l *State) AddAliasCheck(checkID structs.CheckID, srcServiceID structs.Serv // ServiceExists return true if the given service does exists func (l *State) ServiceExists(serviceID structs.ServiceID) bool { - serviceID.EnterpriseMeta.Normalize() + serviceID.Normalize() l.Lock() defer l.Unlock() diff --git a/agent/operator_endpoint.go b/agent/operator_endpoint.go index f669c13bd5ca..de2f45ab79e1 100644 --- a/agent/operator_endpoint.go +++ b/agent/operator_endpoint.go @@ -67,7 +67,7 @@ func (s *HTTPHandlers) OperatorRaftTransferLeader(resp http.ResponseWriter, req if err != nil { return nil, err } - if result.Success != true { + if !result.Success { return nil, HTTPError{StatusCode: http.StatusNotFound, Reason: fmt.Sprintf("Failed to transfer Leader: %s", err.Error())} } reply := new(api.TransferLeaderResponse) @@ -396,12 +396,12 @@ RETRY_ONCE: s.nodeMetricsLabels()) return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < out.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + out.ConsistencyLevel = args.ConsistencyLevel() metrics.IncrCounterWithLabels([]string{"client", "api", "success", "operator_usage"}, 1, s.nodeMetricsLabels()) return out, nil diff --git a/agent/pool/pool.go b/agent/pool/pool.go index d793dcd4a1de..6eb8e8c9368f 100644 --- a/agent/pool/pool.go +++ b/agent/pool/pool.go @@ -456,7 +456,7 @@ func DialRPCViaMeshGateway( return nil, nil, err } - var conn net.Conn = tlsConn + var conn = tlsConn var hc HalfCloser if tlsConn, ok := conn.(*tls.Conn); ok { diff --git a/agent/prepared_query_endpoint.go b/agent/prepared_query_endpoint.go index 8a3f1f038eb0..08f7dca06cb1 100644 --- a/agent/prepared_query_endpoint.go +++ b/agent/prepared_query_endpoint.go @@ -50,12 +50,12 @@ RETRY_ONCE: if err := s.agent.RPC(req.Context(), "PreparedQuery.List", &args, &reply); err != nil { return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - reply.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + reply.ConsistencyLevel = args.ConsistencyLevel() // Use empty list instead of nil. if reply.Queries == nil { @@ -123,12 +123,12 @@ func (s *HTTPHandlers) preparedQueryExecute(id string, resp http.ResponseWriter, var reply structs.PreparedQueryExecuteResponse defer setMeta(resp, &reply.QueryMeta) - if args.QueryOptions.UseCache { + if args.UseCache { raw, m, err := s.agent.cache.Get(req.Context(), cachetype.PreparedQueryName, &args) if err != nil { // Don't return error if StaleIfError is set and we are within it and had // a cached value. - if raw != nil && m.Hit && args.QueryOptions.StaleIfError > m.Age { + if raw != nil && m.Hit && args.StaleIfError > m.Age { // Fall through to the happy path below } else { return nil, err @@ -151,13 +151,13 @@ func (s *HTTPHandlers) preparedQueryExecute(id string, resp http.ResponseWriter, } return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } } - reply.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + reply.ConsistencyLevel = args.ConsistencyLevel() // Note that we translate using the DC that the results came from, since // a query can fail over to a different DC than where the execute request @@ -204,12 +204,12 @@ RETRY_ONCE: } return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - reply.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + reply.ConsistencyLevel = args.ConsistencyLevel() return reply, nil } @@ -233,12 +233,12 @@ RETRY_ONCE: } return nil, err } - if args.QueryOptions.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { + if args.AllowStale && args.MaxStaleDuration > 0 && args.MaxStaleDuration < reply.LastContact { args.AllowStale = false args.MaxStaleDuration = 0 goto RETRY_ONCE } - reply.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() + reply.ConsistencyLevel = args.ConsistencyLevel() return reply.Queries, nil } diff --git a/agent/proxycfg-glue/config_entry.go b/agent/proxycfg-glue/config_entry.go index 4983edbad14e..e1932aec1e02 100644 --- a/agent/proxycfg-glue/config_entry.go +++ b/agent/proxycfg-glue/config_entry.go @@ -123,7 +123,7 @@ func (r *configEntryRequest) Request(index uint64) *pbsubscribe.SubscribeRequest Topic: r.topic, Index: index, Datacenter: r.req.Datacenter, - Token: r.req.QueryOptions.Token, + Token: r.req.Token, } if name := r.req.Name; name == "" { diff --git a/agent/proxycfg-glue/discovery_chain_test.go b/agent/proxycfg-glue/discovery_chain_test.go index e4156667d36b..0331c84874b0 100644 --- a/agent/proxycfg-glue/discovery_chain_test.go +++ b/agent/proxycfg-glue/discovery_chain_test.go @@ -101,7 +101,7 @@ func TestServerCompiledDiscoveryChain(t *testing.T) { func newMockCompiledDiscoveryChain(t *testing.T) *mockCompiledDiscoveryChain { mock := &mockCompiledDiscoveryChain{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-glue/health_blocking.go b/agent/proxycfg-glue/health_blocking.go index 0b2bcba8b972..b6d6e296dad9 100644 --- a/agent/proxycfg-glue/health_blocking.go +++ b/agent/proxycfg-glue/health_blocking.go @@ -59,7 +59,7 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service if args.ServiceName == "" { return fmt.Errorf("Must provide service name") } - if args.EnterpriseMeta.PartitionOrDefault() == acl.WildcardName { + if args.PartitionOrDefault() == acl.WildcardName { return fmt.Errorf("Wildcards are not allowed in the partition field") } @@ -79,7 +79,7 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service return err } - var hadResults bool = false + var hadResults = false return watch.ServerLocalNotify(ctx, correlationID, h.deps.GetStore, func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedCheckServiceNodes, error) { // This is necessary so that service export changes are eventually picked up, since diff --git a/agent/proxycfg-glue/health_test.go b/agent/proxycfg-glue/health_test.go index 821e22a78908..8ff8376ea4df 100644 --- a/agent/proxycfg-glue/health_test.go +++ b/agent/proxycfg-glue/health_test.go @@ -136,7 +136,7 @@ func TestServerHealth(t *testing.T) { func newMockHealth(t *testing.T) *mockHealth { mock := &mockHealth{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-glue/intentions.go b/agent/proxycfg-glue/intentions.go index f3186c6689ab..28369fd196e1 100644 --- a/agent/proxycfg-glue/intentions.go +++ b/agent/proxycfg-glue/intentions.go @@ -41,7 +41,7 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi Type: structs.IntentionMatchDestination, Entries: []structs.IntentionMatchEntry{toIntentionMatchEntry(req)}, }, - QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token}, + QueryOptions: structs.QueryOptions{Token: req.Token}, } return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) { var result any diff --git a/agent/proxycfg-glue/internal_service_dump_test.go b/agent/proxycfg-glue/internal_service_dump_test.go index e3d65ac6ae2c..e3d1675958b4 100644 --- a/agent/proxycfg-glue/internal_service_dump_test.go +++ b/agent/proxycfg-glue/internal_service_dump_test.go @@ -166,7 +166,7 @@ func TestServerInternalServiceDump(t *testing.T) { func newMockInternalServiceDump(t *testing.T) *mockInternalServiceDump { mock := &mockInternalServiceDump{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-glue/resolved_service_config_test.go b/agent/proxycfg-glue/resolved_service_config_test.go index 248ab4eab363..f339eabb259e 100644 --- a/agent/proxycfg-glue/resolved_service_config_test.go +++ b/agent/proxycfg-glue/resolved_service_config_test.go @@ -103,7 +103,7 @@ func TestServerResolvedServiceConfig(t *testing.T) { func newMockResolvedServiceConfig(t *testing.T) *mockResolvedServiceConfig { mock := &mockResolvedServiceConfig{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-glue/service_http_checks_test.go b/agent/proxycfg-glue/service_http_checks_test.go index 87bdfc7abe60..7110129881a7 100644 --- a/agent/proxycfg-glue/service_http_checks_test.go +++ b/agent/proxycfg-glue/service_http_checks_test.go @@ -85,7 +85,7 @@ func TestServerHTTPChecks(t *testing.T) { func newMockServiceHTTPChecks(t *testing.T) *mockServiceHTTPChecks { mock := &mockServiceHTTPChecks{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-glue/service_list.go b/agent/proxycfg-glue/service_list.go index f4a9380df715..6ec8b9946dab 100644 --- a/agent/proxycfg-glue/service_list.go +++ b/agent/proxycfg-glue/service_list.go @@ -55,7 +55,7 @@ func (r *serviceListRequest) Request(index uint64) *pbsubscribe.SubscribeRequest Subject: &pbsubscribe.SubscribeRequest_WildcardSubject{WildcardSubject: true}, Index: index, Datacenter: r.req.Datacenter, - Token: r.req.QueryOptions.Token, + Token: r.req.Token, } } diff --git a/agent/proxycfg-glue/service_list_test.go b/agent/proxycfg-glue/service_list_test.go index c6372aaf4ea1..056e27a6fd9d 100644 --- a/agent/proxycfg-glue/service_list_test.go +++ b/agent/proxycfg-glue/service_list_test.go @@ -127,7 +127,7 @@ func TestServerServiceList(t *testing.T) { func newMockServiceList(t *testing.T) *mockServiceList { mock := &mockServiceList{} - mock.Mock.Test(t) + mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/agent/proxycfg-sources/catalog/config_source.go b/agent/proxycfg-sources/catalog/config_source.go index ec4aabeeb143..bf9347fc47de 100644 --- a/agent/proxycfg-sources/catalog/config_source.go +++ b/agent/proxycfg-sources/catalog/config_source.go @@ -130,7 +130,7 @@ func (m *ConfigSource) startSync( proxyID proxycfg.ProxyID, ) error { logger := m.Logger.With( - "proxy_service_id", proxyID.ServiceID.String(), + "proxy_service_id", proxyID.String(), "node", proxyID.NodeName, ) diff --git a/agent/proxycfg-sources/catalog/config_source_test.go b/agent/proxycfg-sources/catalog/config_source_test.go index 79a7a8578902..4f2bfeebf4b4 100644 --- a/agent/proxycfg-sources/catalog/config_source_test.go +++ b/agent/proxycfg-sources/catalog/config_source_test.go @@ -533,7 +533,7 @@ type mockSession struct { func newMockSession(t *testing.T) *mockSession { m := &mockSession{} - m.Mock.Test(t) + m.Test(t) t.Cleanup(func() { m.AssertExpectations(t) }) diff --git a/agent/proxycfg/api_gateway.go b/agent/proxycfg/api_gateway.go index 0c44e5bfd497..fbef84f61603 100644 --- a/agent/proxycfg/api_gateway.go +++ b/agent/proxycfg/api_gateway.go @@ -398,7 +398,7 @@ func (h *handlerAPIGateway) handleRouteConfigUpdate(ctx context.Context, u Updat name: service.Name, namespace: service.NamespaceOrDefault(), partition: service.PartitionOrDefault(), - datacenter: h.stateConfig.source.Datacenter, + datacenter: h.source.Datacenter, } handler := &handlerUpstreams{handlerState: h.handlerState} @@ -449,7 +449,7 @@ func (h *handlerAPIGateway) handleRouteConfigUpdate(ctx context.Context, u Updat name: service.Name, namespace: service.NamespaceOrDefault(), partition: service.PartitionOrDefault(), - datacenter: h.stateConfig.source.Datacenter, + datacenter: h.source.Datacenter, } handler := &handlerUpstreams{handlerState: h.handlerState} @@ -491,7 +491,7 @@ func (h *handlerAPIGateway) recompileDiscoveryChains(snap *ConfigSnapshot) error for name, listener := range snap.APIGateway.Listeners { boundListener, ok := snap.APIGateway.BoundListeners[name] - if !(ok && snap.APIGateway.GatewayConfig.ListenerIsReady(name)) { + if !ok || !snap.APIGateway.GatewayConfig.ListenerIsReady(name) { // Skip any listeners that don't have a bound listener. Once the bound listener is created, this will be run again. // skip any listeners that might be in an invalid state continue diff --git a/agent/proxycfg/config_snapshot_glue.go b/agent/proxycfg/config_snapshot_glue.go index 6355e0595ec6..74b860a82dfd 100644 --- a/agent/proxycfg/config_snapshot_glue.go +++ b/agent/proxycfg/config_snapshot_glue.go @@ -37,12 +37,12 @@ func (s *ConfigSnapshot) Authorize(authz acl.Authorizer) error { var authzContext acl.AuthorizerContext switch s.Kind { case structs.ServiceKindConnectProxy: - s.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext) + s.ProxyID.FillAuthzContext(&authzContext) if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(s.Proxy.DestinationServiceName, &authzContext); err != nil { return status.Errorf(codes.PermissionDenied, err.Error()) } case structs.ServiceKindMeshGateway, structs.ServiceKindTerminatingGateway, structs.ServiceKindIngressGateway, structs.ServiceKindAPIGateway: - s.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext) + s.ProxyID.FillAuthzContext(&authzContext) if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(s.Service, &authzContext); err != nil { return status.Errorf(codes.PermissionDenied, err.Error()) } diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 0262ffcb3779..3dabc8a6d0e7 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -83,14 +83,14 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, return fmt.Errorf("error filling agent cache: %v", u.Err) } - switch { - case u.CorrelationID == rootsWatchID: + switch u.CorrelationID { + case rootsWatchID: roots, ok := u.Result.(*structs.IndexedCARoots) if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) } snap.Roots = roots - case u.CorrelationID == gatewayConfigWatchID: + case gatewayConfigWatchID: resp, ok := u.Result.(*structs.ConfigEntryResponse) if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) @@ -124,7 +124,7 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, return err } - case u.CorrelationID == gatewayServicesWatchID: + case gatewayServicesWatchID: services, ok := u.Result.(*structs.IndexedGatewayServices) if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) diff --git a/agent/proxycfg/mesh_gateway.go b/agent/proxycfg/mesh_gateway.go index 3d8bcd43a9aa..5c6a2f9174b1 100644 --- a/agent/proxycfg/mesh_gateway.go +++ b/agent/proxycfg/mesh_gateway.go @@ -742,9 +742,7 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u UpdateEvent, sn if len(resp.Nodes) > 0 { snap.MeshGateway.ServiceGroups[sn] = resp.Nodes - } else if _, ok := snap.MeshGateway.ServiceGroups[sn]; ok { - delete(snap.MeshGateway.ServiceGroups, sn) - } + } else delete(snap.MeshGateway.ServiceGroups, sn) case strings.HasPrefix(u.CorrelationID, "peering-connect-service:"): resp, ok := u.Result.(*structs.IndexedCheckServiceNodes) diff --git a/agent/proxycfg/naming.go b/agent/proxycfg/naming.go index a9bd5fd8c0ca..95b27e0a7e9e 100644 --- a/agent/proxycfg/naming.go +++ b/agent/proxycfg/naming.go @@ -98,7 +98,7 @@ func (u *UpstreamID) normalize() { u.Type = "" } - u.EnterpriseMeta.Normalize() + u.Normalize() } // String encodes the UpstreamID into a string for use in agent cache keys. diff --git a/agent/proxycfg/naming_test.go b/agent/proxycfg/naming_test.go index 0615a8128182..5d22acf3c462 100644 --- a/agent/proxycfg/naming_test.go +++ b/agent/proxycfg/naming_test.go @@ -17,7 +17,7 @@ func TestUpstreamIDFromTargetID(t *testing.T) { expect UpstreamID } run := func(t *testing.T, tc testcase) { - tc.expect.EnterpriseMeta.Normalize() + tc.expect.Normalize() got := NewUpstreamIDFromTargetID(tc.tid) require.Equal(t, tc.expect, got) @@ -60,7 +60,7 @@ func TestUpstreamIDFromString(t *testing.T) { expect UpstreamID } run := func(t *testing.T, tc testcase) { - tc.expect.EnterpriseMeta.Normalize() + tc.expect.Normalize() got := UpstreamIDFromString(tc.id) require.Equal(t, tc.expect, got) diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index bac6e8b6dad4..115e80306b7f 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -569,7 +569,7 @@ func (c *ConfigSnapshot) GetMeshGatewayEndpoints(key GatewayKey) structs.CheckSe maxModifyIndex := func(vals structs.CheckServiceNodes) uint64 { var max uint64 for _, v := range vals { - if i := v.Service.RaftIndex.ModifyIndex; i > max { + if i := v.Service.ModifyIndex; i > max { max = i } } diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index d0ae44fbab0c..829db71b7348 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -150,10 +150,10 @@ func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error // places and makes tracking these upstreams simpler as we can dedup them // with the maps tracking upstream ids being watched. if us.DestinationPartition == "" { - proxyCfg.Upstreams[idx].DestinationPartition = ns.EnterpriseMeta.PartitionOrDefault() + proxyCfg.Upstreams[idx].DestinationPartition = ns.PartitionOrDefault() } if us.DestinationNamespace == "" { - proxyCfg.Upstreams[idx].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrDefault() + proxyCfg.Upstreams[idx].DestinationNamespace = ns.NamespaceOrDefault() } // If PeerName is not empty, the DestinationPartition refers @@ -217,10 +217,10 @@ func newKindHandler(config stateConfig, s serviceInstance, ch chan UpdateEvent) case structs.ServiceKindConnectProxy: handler = &handlerConnectProxy{handlerState: h} case structs.ServiceKindTerminatingGateway: - h.stateConfig.logger = config.logger.Named(logging.TerminatingGateway) + h.logger = config.logger.Named(logging.TerminatingGateway) handler = &handlerTerminatingGateway{handlerState: h} case structs.ServiceKindMeshGateway: - h.stateConfig.logger = config.logger.Named(logging.MeshGateway) + h.logger = config.logger.Named(logging.MeshGateway) handler = &handlerMeshGateway{handlerState: h} case structs.ServiceKindIngressGateway: handler = &handlerIngressGateway{handlerState: h} diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 307b81c65c25..43349654f7aa 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -371,7 +371,7 @@ func genVerifyServiceSpecificPeeredRequest(expectedService, expectedFilter, expe require.Equal(t, expectedDatacenter, reqReal.Datacenter) require.Equal(t, expectedPeer, reqReal.PeerName) require.Equal(t, expectedService, reqReal.ServiceName) - require.Equal(t, expectedFilter, reqReal.QueryOptions.Filter) + require.Equal(t, expectedFilter, reqReal.Filter) require.Equal(t, connect, reqReal.Connect) } } @@ -774,8 +774,8 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.Equal(t, 1, snap.ConnectProxy.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.UpstreamPeerTrustBundles.Len()) require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) @@ -814,8 +814,8 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.Equal(t, 1, snap.ConnectProxy.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.UpstreamPeerTrustBundles.Len()) require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) @@ -1066,8 +1066,8 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.Equal(t, 1, snap.ConnectProxy.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.UpstreamPeerTrustBundles.Len()) require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) @@ -1099,8 +1099,8 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) - require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.Equal(t, 1, snap.ConnectProxy.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.UpstreamPeerTrustBundles.Len()) require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) diff --git a/agent/proxycfg/terminating_gateway.go b/agent/proxycfg/terminating_gateway.go index a465808390ad..61b0417d909d 100644 --- a/agent/proxycfg/terminating_gateway.go +++ b/agent/proxycfg/terminating_gateway.go @@ -126,7 +126,7 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u UpdateEv } // Watch the health endpoint to discover endpoints for the service - if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok && !(svc.ServiceKind == structs.GatewayServiceKindDestination) { + if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok && (svc.ServiceKind != structs.GatewayServiceKindDestination) { ctx, cancel := context.WithCancel(ctx) err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{ @@ -178,8 +178,8 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u UpdateEv err := s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{ Request: &pbpeering.TrustBundleListByServiceRequest{ ServiceName: svc.Service.Name, - Namespace: svc.Service.EnterpriseMeta.NamespaceOrDefault(), - Partition: svc.Service.EnterpriseMeta.PartitionOrDefault(), + Namespace: svc.Service.NamespaceOrDefault(), + Partition: svc.Service.PartitionOrDefault(), }, QueryOptions: structs.QueryOptions{Token: s.token}, }, peerTrustBundleIDPrefix+svc.Service.String(), s.ch) @@ -241,7 +241,7 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u UpdateEv // Watch service resolvers for the service // These are used to create clusters and endpoints for the service subsets - if _, ok := snap.TerminatingGateway.WatchedResolvers[svc.Service]; !ok && !(svc.ServiceKind == structs.GatewayServiceKindDestination) { + if _, ok := snap.TerminatingGateway.WatchedResolvers[svc.Service]; !ok && (svc.ServiceKind != structs.GatewayServiceKindDestination) { ctx, cancel := context.WithCancel(ctx) err := s.dataSources.ConfigEntry.Notify(ctx, &structs.ConfigEntryQuery{ diff --git a/agent/proxycfg/testing_mesh_gateway.go b/agent/proxycfg/testing_mesh_gateway.go index 32720a601ea0..cbffc0476f38 100644 --- a/agent/proxycfg/testing_mesh_gateway.go +++ b/agent/proxycfg/testing_mesh_gateway.go @@ -425,7 +425,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st structs.ServiceAddress{Address: "10.0.1.3", Port: 8443}, structs.ServiceAddress{Address: "198.18.1.3", Port: 443}, ) - svc.RaftIndex.ModifyIndex = math.MaxUint64 + svc.ModifyIndex = math.MaxUint64 dc2Nodes = structs.CheckServiceNodes{ { @@ -442,7 +442,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st structs.ServiceAddress{Address: "10.0.1.3", Port: 8443}, structs.ServiceAddress{Address: "198.18.1.3", Port: 443}, ) - svc.RaftIndex.ModifyIndex = 0 + svc.ModifyIndex = 0 dc2Nodes = structs.CheckServiceNodes{ { diff --git a/agent/remote_exec.go b/agent/remote_exec.go index 876c1898620c..3aa25f417f08 100644 --- a/agent/remote_exec.go +++ b/agent/remote_exec.go @@ -262,9 +262,9 @@ QUERY: } if len(out.Entries) == 0 { // If the initial read was stale and had no data, retry as a consistent read - if get.QueryOptions.AllowStale { + if get.AllowStale { a.logger.Debug("trying consistent fetch of remote exec job spec") - get.QueryOptions.AllowStale = false + get.AllowStale = false goto QUERY } else { a.logger.Debug("remote exec aborted, job spec missing") diff --git a/agent/rpc/middleware/interceptors_test.go b/agent/rpc/middleware/interceptors_test.go index a8e07c8d4d2b..596c6cc88354 100644 --- a/agent/rpc/middleware/interceptors_test.go +++ b/agent/rpc/middleware/interceptors_test.go @@ -268,7 +268,7 @@ func TestRequestRecorder(t *testing.T) { o := store.get(key) require.Equal(t, o.key, metricRPCRequest) - require.LessOrEqual(t, o.elapsed, float32(time.Now().Sub(start).Microseconds())/1000) + require.LessOrEqual(t, o.elapsed, float32(time.Since(start).Microseconds())/1000) require.Equal(t, o.labels, tc.expectedLabels) }) diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 2c6655be6682..d49044f5edd5 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -205,7 +205,7 @@ func (s *Server) GenerateToken( ctx context.Context, req *pbpeering.GenerateTokenRequest, ) (*pbpeering.GenerateTokenResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -338,7 +338,7 @@ func (s *Server) GenerateToken( Remote: structs.PeeringTokenRemote{ Partition: req.PartitionOrDefault(), Datacenter: s.Datacenter, - Locality: s.Config.Locality, + Locality: s.Locality, }, } @@ -357,7 +357,7 @@ func (s *Server) Establish( ctx context.Context, req *pbpeering.EstablishRequest, ) (*pbpeering.EstablishResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -606,7 +606,7 @@ func retryExchange(ctx context.Context, req *pbpeerstream.ExchangeSecretRequest, // storage Index, which does not include the hydrated state from reconcilePeering, including // the Active state and the count of imported/exported services. func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -684,7 +684,7 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ // storage Index, which does not include the hydrated state from reconcilePeering, including // the Active state and the count of imported/exported services. func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequest) (*pbpeering.PeeringListResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -811,7 +811,7 @@ func (s *Server) reconcilePeering(peering *pbpeering.Peering) *pbpeering.Peering // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. // Consider removing if we can find another way to populate state store in peering_endpoint_test.go func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRequest) (*pbpeering.PeeringWriteResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -875,7 +875,7 @@ func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRe } func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDeleteRequest) (*pbpeering.PeeringDeleteResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -954,7 +954,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete } func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundleReadRequest) (*pbpeering.TrustBundleReadResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } @@ -1032,7 +1032,7 @@ func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundle // TODO(peering): rename rpc & request/response to drop the "service" part func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest) (*pbpeering.TrustBundleListByServiceResponse, error) { - if !s.Config.PeeringEnabled { + if !s.PeeringEnabled { return nil, peeringNotEnabledErr } diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index f385a11c28cf..2fd8464df49d 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -1909,8 +1909,8 @@ type testingServer struct { func newConfig(t *testing.T, dc, agentType string) resolver.Config { n := t.Name() - s := strings.Replace(n, "/", "", -1) - s = strings.Replace(s, "_", "", -1) + s := strings.ReplaceAll(n, "/", "") + s = strings.ReplaceAll(s, "_", "") return resolver.Config{ Datacenter: dc, AgentType: agentType, diff --git a/agent/rpcclient/configentry/configentry.go b/agent/rpcclient/configentry/configentry.go index 2b38455beb07..017123315067 100644 --- a/agent/rpcclient/configentry/configentry.go +++ b/agent/rpcclient/configentry/configentry.go @@ -47,7 +47,7 @@ func (c *Client) GetConfigEntry( ctx context.Context, req *structs.ConfigEntryQuery, ) (structs.ConfigEntryResponse, cache.ResultMeta, error) { - if c.UseStreamingBackend && (req.QueryOptions.UseCache || req.QueryOptions.MinQueryIndex > 0) { + if c.UseStreamingBackend && (req.UseCache || req.MinQueryIndex > 0) { c.QueryOptionDefaults(&req.QueryOptions) cfgReq, err := c.newConfigEntryRequest(req) if err != nil { @@ -66,7 +66,7 @@ func (c *Client) GetConfigEntry( return out, md, err } - if req.QueryOptions.AllowStale && req.QueryOptions.MaxStaleDuration > 0 && out.LastContact > req.MaxStaleDuration { + if req.AllowStale && req.MaxStaleDuration > 0 && out.LastContact > req.MaxStaleDuration { req.AllowStale = false err := c.NetRPC.RPC(ctx, "ConfigEntry.Get", &req, &out) return out, cache.ResultMeta{}, err @@ -80,7 +80,7 @@ func (c *Client) getConfigEntryRPC( req *structs.ConfigEntryQuery, ) (structs.ConfigEntryResponse, cache.ResultMeta, error) { var out structs.ConfigEntryResponse - if !req.QueryOptions.UseCache { + if !req.UseCache { err := c.NetRPC.RPC(context.Background(), "ConfigEntry.Get", req, &out) return out, cache.ResultMeta{}, err } @@ -140,7 +140,7 @@ func (r *configEntryRequest) Request(index uint64) *pbsubscribe.SubscribeRequest Topic: r.Topic, Index: index, Datacenter: r.req.Datacenter, - Token: r.req.QueryOptions.Token, + Token: r.req.Token, } if name := r.req.Name; name == "" { diff --git a/agent/rpcclient/configentry/view_test.go b/agent/rpcclient/configentry/view_test.go index 0209c898cafe..b2453913623b 100644 --- a/agent/rpcclient/configentry/view_test.go +++ b/agent/rpcclient/configentry/view_test.go @@ -25,7 +25,7 @@ func TestConfigEntryView(t *testing.T) { resp, ok := result.(*structs.ConfigEntryResponse) require.Truef(t, ok, "expected ConfigEntryResponse, got: %T", result) require.Nil(t, resp.Entry) - require.Equal(t, index, resp.QueryMeta.Index) + require.Equal(t, index, resp.Index) }) testutil.RunStep(t, "upsert event", func(t *testing.T) { @@ -110,7 +110,7 @@ func TestConfigEntryListView(t *testing.T) { resp, ok := result.(*structs.IndexedConfigEntries) require.Truef(t, ok, "expected IndexedConfigEntries, got: %T", result) require.Empty(t, resp.Entries) - require.Equal(t, index, resp.QueryMeta.Index) + require.Equal(t, index, resp.Index) }) testutil.RunStep(t, "upsert events", func(t *testing.T) { diff --git a/agent/rpcclient/health/health.go b/agent/rpcclient/health/health.go index c20daa2e82a3..7e1dd53e0b4b 100644 --- a/agent/rpcclient/health/health.go +++ b/agent/rpcclient/health/health.go @@ -36,7 +36,7 @@ func (c *Client) ServiceNodes( ) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error) { // Note: if MergeCentralConfig is requested, default to using the RPC backend for now // as the streaming backend and materializer does not have support for merging yet. - if c.useStreaming(req) && (req.QueryOptions.UseCache || req.QueryOptions.MinQueryIndex > 0) && !req.MergeCentralConfig { + if c.useStreaming(req) && (req.UseCache || req.MinQueryIndex > 0) && !req.MergeCentralConfig { c.QueryOptionDefaults(&req.QueryOptions) result, err := c.ViewStore.Get(ctx, c.newServiceRequest(req)) @@ -53,7 +53,7 @@ func (c *Client) ServiceNodes( } // TODO: DNSServer emitted a metric here, do we still need it? - if req.QueryOptions.AllowStale && req.QueryOptions.MaxStaleDuration > 0 && out.QueryMeta.LastContact > req.MaxStaleDuration { + if req.AllowStale && req.MaxStaleDuration > 0 && out.LastContact > req.MaxStaleDuration { req.AllowStale = false err := c.NetRPC.RPC(context.Background(), "Health.ServiceNodes", &req, &out) return out, cache.ResultMeta{}, err @@ -67,7 +67,7 @@ func (c *Client) getServiceNodes( req structs.ServiceSpecificRequest, ) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error) { var out structs.IndexedCheckServiceNodes - if !req.QueryOptions.UseCache { + if !req.UseCache { err := c.NetRPC.RPC(context.Background(), "Health.ServiceNodes", &req, &out) return out, cache.ResultMeta{}, err } diff --git a/agent/rpcclient/health/view.go b/agent/rpcclient/health/view.go index 2f5b43a4d0cc..10842e4b6279 100644 --- a/agent/rpcclient/health/view.go +++ b/agent/rpcclient/health/view.go @@ -24,8 +24,8 @@ func NewMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index ui Subject: &pbsubscribe.SubscribeRequest_NamedSubject{ NamedSubject: &pbsubscribe.NamedSubject{ Key: srvReq.ServiceName, - Namespace: srvReq.EnterpriseMeta.NamespaceOrEmpty(), - Partition: srvReq.EnterpriseMeta.PartitionOrEmpty(), + Namespace: srvReq.NamespaceOrEmpty(), + Partition: srvReq.PartitionOrEmpty(), PeerName: srvReq.PeerName, }, }, diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index 83eba5ab41a0..609af2a023b9 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -129,23 +129,23 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam require.Equal(t, uint64(1), result.Index) require.Equal(t, empty, result.Value) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "blocks for timeout", func(t *testing.T) { // Subsequent fetch should block for the timeout start := time.Now() - req.QueryOptions.MaxQueryTime = 200 * time.Millisecond + req.MaxQueryTime = 200 * time.Millisecond result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) require.True(t, elapsed >= 200*time.Millisecond, "Fetch should have blocked until timeout") - require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index, "result index should not have changed") + require.Equal(t, req.MinQueryIndex, result.Index, "result index should not have changed") require.Equal(t, empty, result.Value, "result value should not have changed") - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) var lastResultValue structs.CheckServiceNodes @@ -159,7 +159,7 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam streamClient.QueueEvents(newEventServiceHealthRegister(4, 1, "web", peerName)) }() - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) @@ -176,7 +176,7 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam require.Equal(t, peerName, lastResultValue[0].Node.PeerName) require.Equal(t, peerName, lastResultValue[0].Service.PeerName) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "reconnects and resumes after temporary error", func(t *testing.T) { @@ -185,25 +185,25 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam // Next fetch will continue to block until timeout and receive the same // result. start := time.Now() - req.QueryOptions.MaxQueryTime = 200 * time.Millisecond + req.MaxQueryTime = 200 * time.Millisecond result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) require.True(t, elapsed >= 200*time.Millisecond, "Fetch should have blocked until timeout") - require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index, + require.Equal(t, req.MinQueryIndex, result.Index, "result index should not have changed") require.Equal(t, lastResultValue, result.Value.(*structs.IndexedCheckServiceNodes).Nodes, "result value should not have changed") - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index // But an update should still be noticed due to reconnection streamClient.QueueEvents(newEventServiceHealthRegister(10, 2, "web", peerName)) start = time.Now() - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err = store.Get(ctx, req) require.NoError(t, err) elapsed = time.Since(start) @@ -220,7 +220,7 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam require.Equal(t, peerName, lastResultValue[1].Node.PeerName) require.Equal(t, peerName, lastResultValue[1].Service.PeerName) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "returns non-temporary error to watchers", func(t *testing.T) { @@ -232,7 +232,7 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam // Next fetch should return the error start := time.Now() - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.Error(t, err) elapsed := time.Since(start) @@ -241,21 +241,21 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam require.True(t, elapsed < time.Second, "Fetch should have returned before the timeout") - require.Equal(t, req.QueryOptions.MinQueryIndex, result.Index, "result index should not have changed") + require.Equal(t, req.MinQueryIndex, result.Index, "result index should not have changed") require.Equal(t, lastResultValue, result.Value.(*structs.IndexedCheckServiceNodes).Nodes) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index // But an update should still be noticed due to reconnection - streamClient.QueueEvents(newEventServiceHealthRegister(req.QueryOptions.MinQueryIndex+5, 3, "web", peerName)) + streamClient.QueueEvents(newEventServiceHealthRegister(req.MinQueryIndex+5, 3, "web", peerName)) - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err = store.Get(ctx, req) require.NoError(t, err) elapsed = time.Since(start) require.True(t, elapsed < time.Second, "Fetch should have returned before the timeout") - require.Equal(t, req.QueryOptions.MinQueryIndex+5, result.Index, "result index should not have changed") + require.Equal(t, req.MinQueryIndex+5, result.Index, "result index should not have changed") lastResultValue = result.Value.(*structs.IndexedCheckServiceNodes).Nodes require.Len(t, lastResultValue, 3, "result value should contain the new registration") @@ -267,7 +267,7 @@ func testHealthView_IntegrationWithStore_WithEmptySnapshot(t *testing.T, peerNam require.Equal(t, peerName, lastResultValue[2].Node.PeerName) require.Equal(t, peerName, lastResultValue[2].Service.PeerName) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) } @@ -336,7 +336,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName expected.Index = 5 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "blocks until deregistration", func(t *testing.T) { @@ -350,7 +350,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName client.QueueEvents(newEventServiceHealthDeregister(20, 1, "web", peerName)) }() - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) @@ -364,7 +364,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName expected.Index = 20 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "server reload is respected", func(t *testing.T) { @@ -382,7 +382,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName // Make another blocking query with THE SAME index. It should immediately // return the new snapshot. start := time.Now() - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) @@ -394,7 +394,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName expected.Index = 50 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "reconnects and receives new snapshot when server state has changed", func(t *testing.T) { @@ -408,8 +408,8 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName newEndOfSnapshotEvent(50)) start := time.Now() - req.QueryOptions.MinQueryIndex = 49 - req.QueryOptions.MaxQueryTime = time.Second + req.MinQueryIndex = 49 + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.NoError(t, err) elapsed := time.Since(start) @@ -425,7 +425,7 @@ func testHealthView_IntegrationWithStore_WithFullSnapshot(t *testing.T, peerName func newExpectedNodesInPeer(peerName string, nodes ...string) *structs.IndexedCheckServiceNodes { result := &structs.IndexedCheckServiceNodes{} - result.QueryMeta.Backend = structs.QueryBackendStreaming + result.Backend = structs.QueryBackendStreaming for _, node := range nodes { result.Nodes = append(result.Nodes, structs.CheckServiceNode{ Node: &structs.Node{ @@ -495,7 +495,7 @@ func testHealthView_IntegrationWithStore_EventBatches(t *testing.T, peerName str expected := newExpectedNodesInPeer(peerName, "node1", "node2", "node3") expected.Index = 5 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "batched updates work too", func(t *testing.T) { @@ -508,7 +508,7 @@ func testHealthView_IntegrationWithStore_EventBatches(t *testing.T, peerName str newEventServiceHealthRegister(20, 4, "web", peerName), ) client.QueueEvents(batchEv) - req.QueryOptions.MaxQueryTime = time.Second + req.MaxQueryTime = time.Second result, err := store.Get(ctx, req) require.NoError(t, err) @@ -517,7 +517,7 @@ func testHealthView_IntegrationWithStore_EventBatches(t *testing.T, peerName str expected.Index = 20 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) } @@ -575,7 +575,7 @@ func testHealthView_IntegrationWithStore_Filtering(t *testing.T, peerName string expected.Index = 5 prototest.AssertDeepEqual(t, expected, result.Value, cmpCheckServiceNodeNames) - req.QueryOptions.MinQueryIndex = result.Index + req.MinQueryIndex = result.Index }) testutil.RunStep(t, "filtered updates work too", func(t *testing.T) { diff --git a/agent/service_manager.go b/agent/service_manager.go index 355c73eb2c41..5f0896cc1bbb 100644 --- a/agent/service_manager.go +++ b/agent/service_manager.go @@ -333,10 +333,10 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo if us.DestinationType == "" || us.DestinationType == structs.UpstreamDestTypeService { psn := us.DestinationID() if psn.Peer == "" { - psn.ServiceName.EnterpriseMeta.Merge(&ns.EnterpriseMeta) + psn.ServiceName.Merge(&ns.EnterpriseMeta) } else { // Peer services should not have their namespace overwritten. - psn.ServiceName.EnterpriseMeta.OverridePartition(ns.EnterpriseMeta.PartitionOrDefault()) + psn.ServiceName.OverridePartition(ns.PartitionOrDefault()) } upstreams = append(upstreams, psn) } @@ -352,8 +352,8 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo UpstreamServiceNames: upstreams, EnterpriseMeta: ns.EnterpriseMeta, } - if req.QueryOptions.Token == "" { - req.QueryOptions.Token = bd.Tokens.AgentToken() + if req.Token == "" { + req.Token = bd.Tokens.AgentToken() } return req } diff --git a/agent/structs/acl.go b/agent/structs/acl.go index e4ced5e6c16a..aff5ca314ee5 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -534,7 +534,7 @@ func (t *ACLToken) SetHash(force bool) []byte { templatedPolicy.AddToHash(hash) } - t.EnterpriseMeta.AddToHash(hash, false) + t.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) @@ -736,7 +736,7 @@ func (p *ACLPolicy) SetHash(force bool) []byte { hash.Write([]byte(dc)) } - p.EnterpriseMeta.AddToHash(hash, false) + p.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) @@ -992,7 +992,7 @@ func (r *ACLRole) SetHash(force bool) []byte { templatedPolicy.AddToHash(hash) } - r.EnterpriseMeta.AddToHash(hash, false) + r.AddToHash(hash, false) // Finalize the hash hashVal := hash.Sum(nil) diff --git a/agent/structs/acl_templated_policy.go b/agent/structs/acl_templated_policy.go index 076d6ae256e8..cde67e0df108 100644 --- a/agent/structs/acl_templated_policy.go +++ b/agent/structs/acl_templated_policy.go @@ -238,7 +238,7 @@ func (tp *ACLTemplatedPolicy) SyntheticPolicy(entMeta *acl.EnterpriseMeta) (*ACL Datacenters: tp.Datacenters, Description: fmt.Sprintf("synthetic policy generated from templated policy: %s", tp.TemplateName), } - policy.EnterpriseMeta.Merge(entMeta) + policy.Merge(entMeta) policy.SetHash(true) return policy, nil diff --git a/agent/structs/aclfilter/filter.go b/agent/structs/aclfilter/filter.go index d59bf3c9c403..13728b7f6ded 100644 --- a/agent/structs/aclfilter/filter.go +++ b/agent/structs/aclfilter/filter.go @@ -39,64 +39,64 @@ func (f *Filter) Filter(subject any) { f.filterCheckServiceNodes(v) case *structs.IndexedCheckServiceNodes: - v.QueryMeta.ResultsFilteredByACLs = f.filterCheckServiceNodes(&v.Nodes) + v.ResultsFilteredByACLs = f.filterCheckServiceNodes(&v.Nodes) case *structs.PreparedQueryExecuteResponse: - v.QueryMeta.ResultsFilteredByACLs = f.filterCheckServiceNodes(&v.Nodes) + v.ResultsFilteredByACLs = f.filterCheckServiceNodes(&v.Nodes) case *structs.IndexedServiceTopology: filtered := f.filterServiceTopology(v.ServiceTopology) if filtered { v.FilteredByACLs = true - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } case *structs.DatacenterIndexedCheckServiceNodes: - v.QueryMeta.ResultsFilteredByACLs = f.filterDatacenterCheckServiceNodes(&v.DatacenterNodes) + v.ResultsFilteredByACLs = f.filterDatacenterCheckServiceNodes(&v.DatacenterNodes) case *structs.IndexedCoordinates: - v.QueryMeta.ResultsFilteredByACLs = f.filterCoordinates(&v.Coordinates) + v.ResultsFilteredByACLs = f.filterCoordinates(&v.Coordinates) case *structs.IndexedHealthChecks: - v.QueryMeta.ResultsFilteredByACLs = f.filterHealthChecks(&v.HealthChecks) + v.ResultsFilteredByACLs = f.filterHealthChecks(&v.HealthChecks) case *structs.IndexedIntentions: - v.QueryMeta.ResultsFilteredByACLs = f.filterIntentions(&v.Intentions) + v.ResultsFilteredByACLs = f.filterIntentions(&v.Intentions) case *structs.IntentionQueryMatch: f.filterIntentionMatch(v) case *structs.IndexedNodeDump: if f.filterNodeDump(&v.Dump) { - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } if f.filterNodeDump(&v.ImportedDump) { - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } case *structs.IndexedServiceDump: - v.QueryMeta.ResultsFilteredByACLs = f.filterServiceDump(&v.Dump) + v.ResultsFilteredByACLs = f.filterServiceDump(&v.Dump) case *structs.IndexedNodes: - v.QueryMeta.ResultsFilteredByACLs = f.filterNodes(&v.Nodes) + v.ResultsFilteredByACLs = f.filterNodes(&v.Nodes) case *structs.IndexedNodeServices: - v.QueryMeta.ResultsFilteredByACLs = f.filterNodeServices(&v.NodeServices) + v.ResultsFilteredByACLs = f.filterNodeServices(&v.NodeServices) case *structs.IndexedNodeServiceList: - v.QueryMeta.ResultsFilteredByACLs = f.filterNodeServiceList(&v.NodeServices) + v.ResultsFilteredByACLs = f.filterNodeServiceList(&v.NodeServices) case *structs.IndexedServiceNodes: - v.QueryMeta.ResultsFilteredByACLs = f.filterServiceNodes(&v.ServiceNodes) + v.ResultsFilteredByACLs = f.filterServiceNodes(&v.ServiceNodes) case *structs.IndexedServices: - v.QueryMeta.ResultsFilteredByACLs = f.filterServices(v.Services, &v.EnterpriseMeta) + v.ResultsFilteredByACLs = f.filterServices(v.Services, &v.EnterpriseMeta) case *structs.IndexedSessions: - v.QueryMeta.ResultsFilteredByACLs = f.filterSessions(&v.Sessions) + v.ResultsFilteredByACLs = f.filterSessions(&v.Sessions) case *structs.IndexedPreparedQueries: - v.QueryMeta.ResultsFilteredByACLs = f.filterPreparedQueries(&v.Queries) + v.ResultsFilteredByACLs = f.filterPreparedQueries(&v.Queries) case **structs.PreparedQuery: f.redactPreparedQueryTokens(v) @@ -131,11 +131,11 @@ func (f *Filter) Filter(subject any) { f.filterAuthMethod(v) case *structs.IndexedServiceList: - v.QueryMeta.ResultsFilteredByACLs = f.filterServiceList(&v.Services) + v.ResultsFilteredByACLs = f.filterServiceList(&v.Services) case *structs.IndexedExportedServiceList: for peer, peerServices := range v.Services { - v.QueryMeta.ResultsFilteredByACLs = f.filterServiceList(&peerServices) + v.ResultsFilteredByACLs = f.filterServiceList(&peerServices) if len(peerServices) == 0 { delete(v.Services, peer) } else { @@ -144,17 +144,17 @@ func (f *Filter) Filter(subject any) { } case *structs.IndexedGatewayServices: - v.QueryMeta.ResultsFilteredByACLs = f.filterGatewayServices(&v.Services) + v.ResultsFilteredByACLs = f.filterGatewayServices(&v.Services) case *structs.IndexedNodesWithGateways: if f.filterCheckServiceNodes(&v.Nodes) { - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } if f.filterGatewayServices(&v.Gateways) { - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } if f.filterCheckServiceNodes(&v.ImportedNodes) { - v.QueryMeta.ResultsFilteredByACLs = true + v.ResultsFilteredByACLs = true } default: diff --git a/agent/structs/aclfilter/filter_test.go b/agent/structs/aclfilter/filter_test.go index 98f3bb63f291..4c898d2f739b 100644 --- a/agent/structs/aclfilter/filter_test.go +++ b/agent/structs/aclfilter/filter_test.go @@ -527,7 +527,7 @@ func TestACL_filterHealthChecks(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.HealthChecks, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -549,7 +549,7 @@ func TestACL_filterHealthChecks(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.HealthChecks) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -571,7 +571,7 @@ func TestACL_filterHealthChecks(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.HealthChecks) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -580,7 +580,7 @@ func TestACL_filterHealthChecks(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.HealthChecks) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -610,7 +610,7 @@ func TestACL_filterIntentions(t *testing.T) { New(acl.AllowAll(), logger).Filter(list) require.Len(t, list.Intentions, 2) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read 1", func(t *testing.T) { @@ -632,7 +632,7 @@ func TestACL_filterIntentions(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Intentions, 1) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -641,7 +641,7 @@ func TestACL_filterIntentions(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Intentions) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -703,7 +703,7 @@ func TestACL_filterServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.ServiceNodes, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -722,7 +722,7 @@ func TestACL_filterServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.ServiceNodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -731,7 +731,7 @@ func TestACL_filterServiceNodes(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.ServiceNodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -764,7 +764,7 @@ func TestACL_filterNodeServices(t *testing.T) { New(acl.AllowAll(), logger).Filter(list) require.Nil(t, list.NodeServices) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed", func(t *testing.T) { @@ -786,7 +786,7 @@ func TestACL_filterNodeServices(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.NodeServices.Services, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -805,7 +805,7 @@ func TestACL_filterNodeServices(t *testing.T) { New(authz, logger).Filter(list) require.Nil(t, list.NodeServices) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -824,7 +824,7 @@ func TestACL_filterNodeServices(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.NodeServices.Services) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -833,7 +833,7 @@ func TestACL_filterNodeServices(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Nil(t, list.NodeServices) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -861,7 +861,7 @@ func TestACL_filterNodeServiceList(t *testing.T) { New(acl.AllowAll(), logger).Filter(&list) require.Empty(t, list) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed", func(t *testing.T) { @@ -883,7 +883,7 @@ func TestACL_filterNodeServiceList(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.NodeServices.Services, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -902,7 +902,7 @@ func TestACL_filterNodeServiceList(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.NodeServices) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -922,7 +922,7 @@ func TestACL_filterNodeServiceList(t *testing.T) { require.NotEmpty(t, list.NodeServices.Node) require.Empty(t, list.NodeServices.Services) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -931,7 +931,7 @@ func TestACL_filterNodeServiceList(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.NodeServices) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -964,7 +964,7 @@ func TestACL_filterGatewayServices(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Services, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("denied", func(t *testing.T) { @@ -973,7 +973,7 @@ func TestACL_filterGatewayServices(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Services) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -1024,7 +1024,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Nodes, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -1043,7 +1043,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -1062,7 +1062,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -1071,7 +1071,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -1122,7 +1122,7 @@ func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Nodes, 1) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -1141,7 +1141,7 @@ func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -1160,7 +1160,7 @@ func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -1169,7 +1169,7 @@ func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Nodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -1353,7 +1353,7 @@ func TestACL_filterCoordinates(t *testing.T) { New(acl.AllowAll(), logger).Filter(list) require.Len(t, list.Coordinates, 2) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read one node", func(t *testing.T) { @@ -1372,7 +1372,7 @@ func TestACL_filterCoordinates(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Coordinates, 1) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -1381,7 +1381,7 @@ func TestACL_filterCoordinates(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Coordinates) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -1405,7 +1405,7 @@ func TestACL_filterSessions(t *testing.T) { New(acl.AllowAll(), logger).Filter(list) require.Len(t, list.Sessions, 2) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("just one node's sessions allowed", func(t *testing.T) { @@ -1424,7 +1424,7 @@ func TestACL_filterSessions(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Sessions, 1) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -1433,7 +1433,7 @@ func TestACL_filterSessions(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Sessions) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -2059,7 +2059,7 @@ func TestACL_filterIndexedServiceDump(t *testing.T) { New(authz, logger).Filter(list) require.Len(t, list.Dump, 2) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("not allowed to access node", func(t *testing.T) { @@ -2082,7 +2082,7 @@ func TestACL_filterIndexedServiceDump(t *testing.T) { require.Len(t, list.Dump, 1) require.Equal(t, "bar", list.Dump[0].GatewayService.Service.Name) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("not allowed to access service", func(t *testing.T) { @@ -2104,7 +2104,7 @@ func TestACL_filterIndexedServiceDump(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Dump) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("not allowed to access gateway", func(t *testing.T) { @@ -2126,7 +2126,7 @@ func TestACL_filterIndexedServiceDump(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.Dump) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -2202,7 +2202,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { require.Len(t, list.DatacenterNodes["dc1"], 2) require.Len(t, list.DatacenterNodes["dc2"], 2) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("allowed to read the service, but not the node", func(t *testing.T) { @@ -2221,7 +2221,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.DatacenterNodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("allowed to read the node, but not the service", func(t *testing.T) { @@ -2240,7 +2240,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { New(authz, logger).Filter(list) require.Empty(t, list.DatacenterNodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("denied", func(t *testing.T) { @@ -2249,7 +2249,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.DatacenterNodes) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -2357,7 +2357,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { // Check we get the un-redacted token. require.Equal(t, "root", list.Queries[2].Token) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("permissive filtering", func(t *testing.T) { @@ -2378,7 +2378,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { // ResultsFilteredByACLs should not include un-named queries, which are only // readable by a management token. - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) t.Run("limited access", func(t *testing.T) { @@ -2402,7 +2402,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { // Check the token is redacted. require.Equal(t, RedactedToken, list.Queries[0].Token) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("restrictive filtering", func(t *testing.T) { @@ -2411,7 +2411,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { New(acl.DenyAll(), logger).Filter(list) require.Empty(t, list.Queries) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) } @@ -2432,7 +2432,7 @@ func TestACL_filterServiceList(t *testing.T) { list := makeList() New(acl.AllowAll(), logger).Filter(list) - require.False(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.False(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") require.Len(t, list.Services, 2) }) @@ -2441,7 +2441,7 @@ func TestACL_filterServiceList(t *testing.T) { list := makeList() New(acl.DenyAll(), logger).Filter(list) - require.True(t, list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.True(t, list.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") require.Empty(t, list.Services) }) } diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 32b4e0c89de0..15cec184f6b8 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -965,7 +965,7 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo { UpstreamServiceNames: r.UpstreamServiceNames, ProxyMode: r.Mode, MeshGatewayConfig: r.MeshGateway, - Filter: r.QueryOptions.Filter, + Filter: r.Filter, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces @@ -1083,8 +1083,8 @@ func (cfg *UpstreamConfig) normalize(named bool, entMeta *acl.EnterpriseMeta) er if named { // If the upstream namespace is omitted it inherits that of the enclosing // config entry. - cfg.EnterpriseMeta.MergeNoWildcard(entMeta) - cfg.EnterpriseMeta.Normalize() + cfg.MergeNoWildcard(entMeta) + cfg.Normalize() } cfg.Protocol = strings.ToLower(cfg.Protocol) @@ -1109,17 +1109,17 @@ func (cfg UpstreamConfig) validate(named bool) error { if cfg.Name == WildcardSpecifier { return fmt.Errorf("Wildcard name is not supported") } - if cfg.EnterpriseMeta.NamespaceOrDefault() == WildcardSpecifier { + if cfg.NamespaceOrDefault() == WildcardSpecifier { return fmt.Errorf("Wildcard namespace is not supported") } } else { if cfg.Name != "" { return fmt.Errorf("Name must be empty") } - if cfg.EnterpriseMeta.NamespaceOrEmpty() != "" { + if cfg.NamespaceOrEmpty() != "" { return fmt.Errorf("Namespace must be empty") } - if cfg.EnterpriseMeta.PartitionOrEmpty() != "" { + if cfg.PartitionOrEmpty() != "" { return fmt.Errorf("Partition must be empty") } } diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 8b43c4f9243a..4c010ea1397e 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -131,10 +131,10 @@ func (e *ServiceRouterConfigEntry) Normalize() error { } if route.Destination != nil && route.Destination.Namespace == "" { - route.Destination.Namespace = e.EnterpriseMeta.NamespaceOrEmpty() + route.Destination.Namespace = e.NamespaceOrEmpty() } if route.Destination != nil && route.Destination.Partition == "" { - route.Destination.Partition = e.EnterpriseMeta.PartitionOrEmpty() + route.Destination.Partition = e.PartitionOrEmpty() } } @@ -337,7 +337,7 @@ func (e *ServiceRouterConfigEntry) ListRelatedServices() []ServiceID { out = append(out, svc) } sort.Slice(out, func(i, j int) bool { - return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) || + return out[i].LessThan(&out[j].EnterpriseMeta) || out[i].ID < out[j].ID }) return out @@ -600,7 +600,7 @@ func (e *ServiceSplitterConfigEntry) Normalize() error { if len(e.Splits) > 0 { for i, split := range e.Splits { if split.Namespace == "" { - split.Namespace = e.EnterpriseMeta.NamespaceOrDefault() + split.Namespace = e.NamespaceOrDefault() } e.Splits[i].Weight = NormalizeServiceSplitWeight(split.Weight) } @@ -720,7 +720,7 @@ func (e *ServiceSplitterConfigEntry) ListRelatedServices() []ServiceID { out = append(out, svc) } sort.Slice(out, func(i, j int) bool { - return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) || + return out[i].LessThan(&out[j].EnterpriseMeta) || out[i].ID < out[j].ID }) return out @@ -1364,7 +1364,7 @@ func (e *ServiceResolverConfigEntry) ListRelatedServices() []ServiceID { out = append(out, svc) } sort.Slice(out, func(i, j int) bool { - return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) || + return out[i].LessThan(&out[j].EnterpriseMeta) || out[i].ID < out[j].ID }) return out @@ -1764,7 +1764,7 @@ func (r *DiscoveryChainRequest) CacheInfo() cache.RequestInfo { OverrideMeshGateway: r.OverrideMeshGateway, OverrideProtocol: r.OverrideProtocol, OverrideConnectTimeout: r.OverrideConnectTimeout, - Filter: r.QueryOptions.Filter, + Filter: r.Filter, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index 1e538d79c973..94766671d2e1 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -198,8 +198,8 @@ func (e *IngressGatewayConfigEntry) Normalize() error { listener.Protocol = strings.ToLower(listener.Protocol) for i := range listener.Services { - listener.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta) - listener.Services[i].EnterpriseMeta.Normalize() + listener.Services[i].Merge(&e.EnterpriseMeta) + listener.Services[i].Normalize() } // Make sure to set the item back into the array, since we are not using @@ -441,7 +441,7 @@ func (e *IngressGatewayConfigEntry) ListRelatedServices() []ServiceID { out = append(out, svc) } sort.Slice(out, func(i, j int) bool { - return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) || + return out[i].LessThan(&out[j].EnterpriseMeta) || out[i].ID < out[j].ID }) return out @@ -554,8 +554,8 @@ func (e *TerminatingGatewayConfigEntry) Normalize() error { e.EnterpriseMeta.Normalize() for i := range e.Services { - e.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta) - e.Services[i].EnterpriseMeta.Normalize() + e.Services[i].Merge(&e.EnterpriseMeta) + e.Services[i].Normalize() } h, err := HashConfigEntry(e) @@ -598,7 +598,7 @@ func (e *TerminatingGatewayConfigEntry) Validate() error { // If either client cert config file was specified then the CA file, client cert, and key file must be specified // Specifying only a CAFile is allowed for one-way TLS if (svc.CertFile != "" || svc.KeyFile != "") && - !(svc.CAFile != "" && svc.CertFile != "" && svc.KeyFile != "") { + (svc.CAFile == "" || svc.CertFile == "" || svc.KeyFile == "") { return fmt.Errorf("Service %q must have a CertFile, CAFile, and KeyFile specified for TLS origination", svc.Name) } @@ -806,8 +806,8 @@ func (e *APIGatewayConfigEntry) Normalize() error { if cert.Kind == "" { cert.Kind = InlineCertificate } - cert.EnterpriseMeta.Merge(e.GetEnterpriseMeta()) - cert.EnterpriseMeta.Normalize() + cert.Merge(e.GetEnterpriseMeta()) + cert.Normalize() listener.TLS.Certificates[i] = cert } @@ -1140,14 +1140,14 @@ func (e *BoundAPIGatewayConfigEntry) GetMeta() map[string]string { return e.Meta func (e *BoundAPIGatewayConfigEntry) Normalize() error { for i, listener := range e.Listeners { for j, route := range listener.Routes { - route.EnterpriseMeta.Merge(&e.EnterpriseMeta) - route.EnterpriseMeta.Normalize() + route.Merge(&e.EnterpriseMeta) + route.Normalize() listener.Routes[j] = route } for j, cert := range listener.Certificates { - cert.EnterpriseMeta.Merge(&e.EnterpriseMeta) - cert.EnterpriseMeta.Normalize() + cert.Merge(&e.EnterpriseMeta) + cert.Normalize() listener.Certificates[j] = cert } diff --git a/agent/structs/config_entry_inline_certificate.go b/agent/structs/config_entry_inline_certificate.go index a13b1d720a24..ed57203bb3ba 100644 --- a/agent/structs/config_entry_inline_certificate.go +++ b/agent/structs/config_entry_inline_certificate.go @@ -160,9 +160,7 @@ func (e *InlineCertificateConfigEntry) Hosts() ([]string, error) { hosts := []string{certificate.Subject.CommonName} - for _, name := range certificate.DNSNames { - hosts = append(hosts, name) - } + hosts = append(hosts, certificate.DNSNames...) for _, ip := range certificate.IPAddresses { hosts = append(hosts, ip.String()) diff --git a/agent/structs/config_entry_intentions.go b/agent/structs/config_entry_intentions.go index 6da3160dd610..58005f34d321 100644 --- a/agent/structs/config_entry_intentions.go +++ b/agent/structs/config_entry_intentions.go @@ -415,9 +415,7 @@ func (p *IntentionHTTPPermission) Clone() *IntentionHTTPPermission { if len(p.Header) > 0 { p2.Header = make([]IntentionHTTPHeaderPermission, 0, len(p.Header)) - for _, hdr := range p.Header { - p2.Header = append(p2.Header, hdr) - } + p2.Header = append(p2.Header, p.Header...) } p2.Methods = stringslice.CloneStringSlice(p.Methods) @@ -546,10 +544,10 @@ func (e *ServiceIntentionsConfigEntry) normalize(legacyWrite bool) error { if src.Peer != "" || src.SamenessGroup != "" { // If the source is peered or a sameness group, normalize the namespace only, // since they are mutually exclusive with partition. - src.EnterpriseMeta.NormalizeNamespace() + src.NormalizeNamespace() } else { - src.EnterpriseMeta.MergeNoWildcard(&e.EnterpriseMeta) - src.EnterpriseMeta.Normalize() + src.MergeNoWildcard(&e.EnterpriseMeta) + src.Normalize() } // Compute the precedence only AFTER normalizing namespaces since the @@ -653,7 +651,7 @@ func (e *ServiceIntentionsConfigEntry) LegacyValidate() error { } func (e *ServiceIntentionsConfigEntry) HasWildcardDestination() bool { - dstNS := e.EnterpriseMeta.NamespaceOrDefault() + dstNS := e.NamespaceOrDefault() return dstNS == WildcardSpecifier || e.Name == WildcardSpecifier } diff --git a/agent/structs/config_entry_routes.go b/agent/structs/config_entry_routes.go index 7b5ac179aab0..32fab44736a5 100644 --- a/agent/structs/config_entry_routes.go +++ b/agent/structs/config_entry_routes.go @@ -95,8 +95,8 @@ func (e *HTTPRouteConfigEntry) Normalize() error { if parent.Kind == "" { parent.Kind = APIGateway } - parent.EnterpriseMeta.Merge(e.GetEnterpriseMeta()) - parent.EnterpriseMeta.Normalize() + parent.Merge(e.GetEnterpriseMeta()) + parent.Normalize() e.Parents[i] = parent } @@ -121,8 +121,8 @@ func (e *HTTPRouteConfigEntry) Normalize() error { } func (e *HTTPRouteConfigEntry) normalizeHTTPService(service HTTPService) HTTPService { - service.EnterpriseMeta.Merge(e.GetEnterpriseMeta()) - service.EnterpriseMeta.Normalize() + service.Merge(e.GetEnterpriseMeta()) + service.Normalize() if service.Weight <= 0 { service.Weight = 1 } @@ -605,14 +605,14 @@ func (e *TCPRouteConfigEntry) Normalize() error { if parent.Kind == "" { parent.Kind = APIGateway } - parent.EnterpriseMeta.Merge(e.GetEnterpriseMeta()) - parent.EnterpriseMeta.Normalize() + parent.Merge(e.GetEnterpriseMeta()) + parent.Normalize() e.Parents[i] = parent } for i, service := range e.Services { - service.EnterpriseMeta.Merge(e.GetEnterpriseMeta()) - service.EnterpriseMeta.Normalize() + service.Merge(e.GetEnterpriseMeta()) + service.Normalize() e.Services[i] = service } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 5fa7b0771549..3856a5b6d1c2 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -247,9 +247,7 @@ type IssuedCert struct { } func (i *IssuedCert) Key() string { - return fmt.Sprintf("%s", - i.SerialNumber, - ) + return i.SerialNumber } // CAOp is the operation for a request related to intentions. @@ -631,8 +629,6 @@ func ParseDurationFunc() mapstructure.DecodeHookFunc { func Uint8ToString(bs []uint8) string { b := make([]byte, len(bs)) - for i, v := range bs { - b[i] = v - } + copy(b, bs) return string(b) } diff --git a/agent/structs/connect_proxy_config.go b/agent/structs/connect_proxy_config.go index 3bd5276f8279..5c82aac82419 100644 --- a/agent/structs/connect_proxy_config.go +++ b/agent/structs/connect_proxy_config.go @@ -373,7 +373,7 @@ func (c *ConnectProxyConfig) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - out.Alias.Config = proxyConfig + out.Config = proxyConfig if !c.TransparentProxy.IsZero() { out.TransparentProxy = &out.Alias.TransparentProxy @@ -718,9 +718,7 @@ func (e ExposeConfig) Clone() ExposeConfig { e2 := e if len(e.Paths) > 0 { e2.Paths = make([]ExposePath, 0, len(e.Paths)) - for _, p := range e.Paths { - e2.Paths = append(e2.Paths, p) - } + e2.Paths = append(e2.Paths, e.Paths...) } return e2 } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 95e9d8388aae..5162d87c2cdd 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -669,7 +669,7 @@ func (q *IntentionQueryRequest) CacheInfo() cache.RequestInfo { Check: q.Check, Match: q.Match, Exact: q.Exact, - Filter: q.QueryOptions.Filter, + Filter: q.Filter, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 9b9fec89e954..4beb41d1ced5 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -82,7 +82,7 @@ func (s *ServiceDefinition) NodeService() *NodeService { EnterpriseMeta: s.EnterpriseMeta, Locality: s.Locality, } - ns.EnterpriseMeta.Normalize() + ns.Normalize() if s.Connect != nil { ns.Connect = *s.Connect @@ -98,10 +98,10 @@ func (s *ServiceDefinition) NodeService() *NodeService { // If a proxy's namespace and partition are not defined, inherit from the proxied service // Applicable only to Consul Enterprise. if ns.Proxy.Upstreams[i].DestinationNamespace == "" { - ns.Proxy.Upstreams[i].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrEmpty() + ns.Proxy.Upstreams[i].DestinationNamespace = ns.NamespaceOrEmpty() } if ns.Proxy.Upstreams[i].DestinationPartition == "" { - ns.Proxy.Upstreams[i].DestinationPartition = ns.EnterpriseMeta.PartitionOrEmpty() + ns.Proxy.Upstreams[i].DestinationPartition = ns.PartitionOrEmpty() } } ns.Proxy.Expose = s.Proxy.Expose diff --git a/agent/structs/structs.go b/agent/structs/structs.go index d2c7e0eaa29c..d62e9728d6f7 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -658,7 +658,7 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { } func (r *DCSpecificRequest) CacheMinIndex() uint64 { - return r.QueryOptions.MinQueryIndex + return r.MinQueryIndex } type OperatorUsageRequest struct { @@ -719,7 +719,7 @@ func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo { } func (r *ServiceDumpRequest) CacheMinIndex() uint64 { - return r.QueryOptions.MinQueryIndex + return r.MinQueryIndex } // PartitionSpecificRequest is used to query about a specific partition. @@ -742,7 +742,7 @@ func (r *PartitionSpecificRequest) CacheInfo() cache.RequestInfo { Timeout: r.MaxQueryTime, MaxAge: r.MaxAge, MustRevalidate: r.MustRevalidate, - Key: r.EnterpriseMeta.PartitionOrDefault(), + Key: r.PartitionOrDefault(), } } @@ -841,7 +841,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo { } func (r *ServiceSpecificRequest) CacheMinIndex() uint64 { - return r.QueryOptions.MinQueryIndex + return r.MinQueryIndex } // NodeSpecificRequest is used to request the information about a single node @@ -1751,7 +1751,7 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool { s.ServiceEnableTagOverride != other.ServiceEnableTagOverride || !reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) || !reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) || - !s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { + !s.IsSame(&other.EnterpriseMeta) { return false } @@ -2285,7 +2285,7 @@ func NewCheckID(id types.CheckID, entMeta *acl.EnterpriseMeta) CheckID { } cid.EnterpriseMeta = *entMeta - cid.EnterpriseMeta.Normalize() + cid.Normalize() return cid } @@ -2295,7 +2295,7 @@ func NewCheckID(id types.CheckID, entMeta *acl.EnterpriseMeta) CheckID { func (cid CheckID) StringHashMD5() string { hasher := md5.New() hasher.Write([]byte(cid.ID)) - cid.EnterpriseMeta.AddToHash(hasher, true) + cid.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } @@ -2304,7 +2304,7 @@ func (cid CheckID) StringHashMD5() string { func (cid CheckID) StringHashSHA256() string { hasher := sha256.New() hasher.Write([]byte(cid.ID)) - cid.EnterpriseMeta.AddToHash(hasher, true) + cid.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } @@ -2321,7 +2321,7 @@ func NewServiceID(id string, entMeta *acl.EnterpriseMeta) ServiceID { } sid.EnterpriseMeta = *entMeta - sid.EnterpriseMeta.Normalize() + sid.Normalize() return sid } @@ -2334,7 +2334,7 @@ func (sid ServiceID) Matches(other ServiceID) bool { func (sid ServiceID) StringHashSHA256() string { hasher := sha256.New() hasher.Write([]byte(sid.ID)) - sid.EnterpriseMeta.AddToHash(hasher, true) + sid.AddToHash(hasher, true) return fmt.Sprintf("%x", hasher.Sum(nil)) } @@ -2395,7 +2395,7 @@ func NewServiceName(name string, entMeta *acl.EnterpriseMeta) ServiceName { } ret.EnterpriseMeta = *entMeta - ret.EnterpriseMeta.Normalize() + ret.Normalize() return ret } diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 3f73e0cc287a..6e4dabc2e1e8 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -2808,8 +2808,8 @@ func makeFrank() *frankensteinStruct { } func makeMonster() *monsterStruct { - var d time.Duration = 9 * time.Hour - var t time.Time = time.Date(2008, 1, 2, 3, 4, 5, 0, time.UTC) + var d = 9 * time.Hour + var t = time.Date(2008, 1, 2, 3, 4, 5, 0, time.UTC) return &monsterStruct{ Bool: true, diff --git a/agent/testagent.go b/agent/testagent.go index 5f0225c42579..4ad36d7b44e3 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -278,9 +278,9 @@ func (a *TestAgent) Start(t testutil.TestingTB) error { a.Agent = agent // Start the anti-entropy syncer - a.Agent.StartSync() + a.StartSync() - a.srv = a.Agent.httpHandlers + a.srv = a.httpHandlers if err := a.waitForUp(); err != nil { a.Shutdown() @@ -328,7 +328,7 @@ func (a *TestAgent) waitForUp() error { retErr = fmt.Errorf("Catalog.ListNodes failed: %v", err) continue // fail, try again } - if !out.QueryMeta.KnownLeader { + if !out.KnownLeader { retErr = fmt.Errorf("No leader") continue // fail, try again } @@ -359,7 +359,7 @@ func (a *TestAgent) waitForUp() error { func (a *TestAgent) isACLBootstrapped() (bool, error) { if a.config.ACLInitialManagementToken == "" { - logger := a.Agent.logger.Named("test") + logger := a.logger.Named("test") logger.Warn("Skipping check for ACL bootstrapping") return true, nil // We lie because we can't check. @@ -409,11 +409,11 @@ func (a *TestAgent) Shutdown() error { } // shutdown agent before endpoints - defer a.Agent.ShutdownEndpoints() - if err := a.Agent.ShutdownAgent(); err != nil { + defer a.ShutdownEndpoints() + if err := a.ShutdownAgent(); err != nil { return err } - <-a.Agent.ShutdownCh() + <-a.ShutdownCh() return nil } @@ -425,7 +425,7 @@ func (a *TestAgent) DNSAddr() string { } func (a *TestAgent) HTTPAddr() string { - addr, err := firstAddr(a.Agent.apiServers, "http") + addr, err := firstAddr(a.apiServers, "http") if err != nil { // TODO: t.Fatal instead of panic panic("no http server registered") @@ -445,7 +445,7 @@ func firstAddr(s *apiServers, protocol string) (net.Addr, error) { } func (a *TestAgent) SegmentAddr(name string) string { - if server, ok := a.Agent.delegate.(*consul.Server); ok { + if server, ok := a.delegate.(*consul.Server); ok { return server.LANSegmentAddr(name) } return "" @@ -473,7 +473,7 @@ func (a *TestAgent) DNSDisableCompression(b bool) { // TODO: rename to newConsulConfig // TODO: remove TestAgent receiver, accept a.Agent.config as an arg func (a *TestAgent) consulConfig() *consul.Config { - c, err := newConsulConfig(a.Agent.config, a.Agent.logger) + c, err := newConsulConfig(a.config, a.logger) if err != nil { panic(err) } diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 2df3cb784989..4055c19fb677 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -2678,13 +2678,13 @@ func TestUIEndpoint_MetricsProxy(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // Reload the agent config with the desired UI config by making a copy and // using internal reload. - cfg := *a.Agent.config + cfg := *a.config // Modify the UIConfig part (this is a copy remember and that struct is // not a pointer) cfg.UIConfig.MetricsProxy = tc.config - require.NoError(t, a.Agent.reloadConfigInternal(&cfg)) + require.NoError(t, a.reloadConfigInternal(&cfg)) // Now fetch the API handler to run requests against a.enableDebug.Store(true) diff --git a/agent/util.go b/agent/util.go index 1bcdbca01f1a..a2f4e4fa470d 100644 --- a/agent/util.go +++ b/agent/util.go @@ -113,21 +113,21 @@ func (d durationFixer) FixupDurations(raw interface{}) error { return nil } for key, val := range rawMap { - switch val.(type) { + switch val := val.(type) { case map[string]interface{}: if err := d.FixupDurations(val); err != nil { return err } case []interface{}: - for _, v := range val.([]interface{}) { + for _, v := range val { if err := d.FixupDurations(v); err != nil { return err } } case []map[string]interface{}: - for _, v := range val.([]map[string]interface{}) { + for _, v := range val { if err := d.FixupDurations(v); err != nil { return err } diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 791773f8c4a2..028013353e58 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -864,7 +864,7 @@ func (s *ResourceGenerator) makeDestinationClusters(cfgSnap *proxycfg.ConfigSnap clusters := make([]proto.Message, 0, len(cfgSnap.TerminatingGateway.DestinationServices)) for _, svcName := range cfgSnap.TerminatingGateway.ValidDestinations() { - svcConfig, _ := serviceConfigs[svcName] + svcConfig := serviceConfigs[svcName] dest := svcConfig.Destination for _, address := range dest.Addresses { @@ -1761,10 +1761,7 @@ func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, op OutlierDetection: &envoy_cluster_v3.OutlierDetection{}, } - useEDS := true - if len(opts.hostnameEndpoints) > 0 { - useEDS = false - } + useEDS := !(len(opts.hostnameEndpoints) > 0) // TCP keepalive settings can be enabled for terminating gateway upstreams or remote mesh gateways. remoteUpstream := opts.isRemote || snap.Kind == structs.ServiceKindTerminatingGateway diff --git a/agent/xds/delta_envoy_extender_ce_test.go b/agent/xds/delta_envoy_extender_ce_test.go index acd18700394a..1395c9310681 100644 --- a/agent/xds/delta_envoy_extender_ce_test.go +++ b/agent/xds/delta_envoy_extender_ce_test.go @@ -42,10 +42,7 @@ func TestEnvoyExtenderWithSnapshot(t *testing.T) { // Otherwise payload-passthrough=false and invocation-mode=synchronous. // This is used to test all the permutations. makeLambdaServiceDefaults := func(opposite bool) *structs.ServiceConfigEntry { - payloadPassthrough := true - if opposite { - payloadPassthrough = false - } + payloadPassthrough := !opposite invocationMode := "synchronous" if opposite { diff --git a/agent/xds/delta_test.go b/agent/xds/delta_test.go index ab170f9d66b9..643cb3b56baa 100644 --- a/agent/xds/delta_test.go +++ b/agent/xds/delta_test.go @@ -186,8 +186,8 @@ func TestServer_DeltaAggregatedResources_v3_BasicProtocol_TCP(t *testing.T) { }) deleteAllButOneEndpoint := func(snap *proxycfg.ConfigSnapshot, uid proxycfg.UpstreamID, targetID string) { - snap.ConnectProxy.ConfigSnapshotUpstreams.WatchedUpstreamEndpoints[uid][targetID] = - snap.ConnectProxy.ConfigSnapshotUpstreams.WatchedUpstreamEndpoints[uid][targetID][0:1] + snap.ConnectProxy.WatchedUpstreamEndpoints[uid][targetID] = + snap.ConnectProxy.WatchedUpstreamEndpoints[uid][targetID][0:1] } testutil.RunStep(t, "avoid sending config for unsubscribed resource", func(t *testing.T) { diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 5b01bdc4dfe2..449f87c8b6e6 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1707,8 +1707,7 @@ func (s *ResourceGenerator) makeTerminatingGatewayListener( ) } - var dest *structs.DestinationConfig - dest = &svcConfig.Destination + var dest *structs.DestinationConfig = &svcConfig.Destination opts := terminatingGatewayFilterChainOpts{ service: svc, @@ -2467,7 +2466,7 @@ func makeStatPrefix(prefix, filterName string) string { // Replace colons here because Envoy does that in the metrics for the actual // clusters but doesn't in the stat prefix here while dashboards assume they // will match. - return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1)) + return fmt.Sprintf("%s%s", prefix, strings.ReplaceAll(filterName, ":", "_")) } func makeTracingFromUserConfig(configJSON string) (*envoy_http_v3.HttpConnectionManager_Tracing, error) { diff --git a/agent/xds/testing.go b/agent/xds/testing.go index 6d90005cd78f..c18a6591ee38 100644 --- a/agent/xds/testing.go +++ b/agent/xds/testing.go @@ -42,7 +42,7 @@ func NewTestADSDeltaStream(t testing.T, ctx context.Context) *TestADSDeltaStream sendCh: make(chan *envoy_discovery_v3.DeltaDiscoveryResponse, 1), recvCh: make(chan *envoy_discovery_v3.DeltaDiscoveryRequest, 1), } - s.stubGrpcServerStream.ctx = ctx + s.ctx = ctx return s } diff --git a/agent/xds/xds_protocol_helpers_test.go b/agent/xds/xds_protocol_helpers_test.go index ff2fbdf38614..fb79dbfe9883 100644 --- a/agent/xds/xds_protocol_helpers_test.go +++ b/agent/xds/xds_protocol_helpers_test.go @@ -62,7 +62,7 @@ func newTestSnapshot( if dbServiceProtocol != "" { // Simulate ServiceManager injection of protocol snap.Proxy.Upstreams[0].Config["protocol"] = dbServiceProtocol - snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamConfig = proxycfg.UpstreamsToMap(snap.Proxy.Upstreams) + snap.ConnectProxy.UpstreamConfig = proxycfg.UpstreamsToMap(snap.Proxy.Upstreams) } return snap } diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index 3fa5d1e1a31a..735b2a7bcd0b 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -223,11 +223,11 @@ type captureUI struct { } func (c *captureUI) Stdout() io.Writer { - return c.MockUi.OutputWriter + return c.OutputWriter } func (c *captureUI) Stderr() io.Writer { - return c.MockUi.ErrorWriter + return c.ErrorWriter } func (c *captureUI) HeaderOutput(s string) { diff --git a/command/catalog/list/nodes/catalog_list_nodes.go b/command/catalog/list/nodes/catalog_list_nodes.go index 4388d753f77f..dc20abca9c2f 100644 --- a/command/catalog/list/nodes/catalog_list_nodes.go +++ b/command/catalog/list/nodes/catalog_list_nodes.go @@ -144,7 +144,7 @@ func (c *cmd) Run(args []string) int { // printNodes accepts a list of nodes and prints information in a tabular // format about the nodes. func printNodes(nodes []*api.Node, detailed bool) (string, error) { - var result []string = detailedNodes(nodes, detailed) + var result = detailedNodes(nodes, detailed) return columnize.Format(result, &columnize.Config{Delim: string([]byte{0x1f})}), nil } From 19de71d05d2f0a7d127dfa1539b386c878b87ba9 Mon Sep 17 00:00:00 2001 From: Sreeram Narayanan Date: Thu, 28 Aug 2025 13:58:18 +0530 Subject: [PATCH 2/3] fix: missing braces for else in mesh_gateway.go --- agent/proxycfg/mesh_gateway.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/agent/proxycfg/mesh_gateway.go b/agent/proxycfg/mesh_gateway.go index 5c6a2f9174b1..1e954bd4fcfc 100644 --- a/agent/proxycfg/mesh_gateway.go +++ b/agent/proxycfg/mesh_gateway.go @@ -742,7 +742,9 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u UpdateEvent, sn if len(resp.Nodes) > 0 { snap.MeshGateway.ServiceGroups[sn] = resp.Nodes - } else delete(snap.MeshGateway.ServiceGroups, sn) + } else { + delete(snap.MeshGateway.ServiceGroups, sn) + } case strings.HasPrefix(u.CorrelationID, "peering-connect-service:"): resp, ok := u.Result.(*structs.IndexedCheckServiceNodes) From f3992e066473546a4b8ec74839ee08f3ea63506e Mon Sep 17 00:00:00 2001 From: Sreeram Narayanan Date: Thu, 28 Aug 2025 14:15:40 +0530 Subject: [PATCH 3/3] fix: remove unnecessary type check in otel_sink_test.go --- agent/hcp/telemetry/otel_sink_test.go | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/agent/hcp/telemetry/otel_sink_test.go b/agent/hcp/telemetry/otel_sink_test.go index f6ae2ab4a545..f282ddad59f4 100644 --- a/agent/hcp/telemetry/otel_sink_test.go +++ b/agent/hcp/telemetry/otel_sink_test.go @@ -398,7 +398,7 @@ func TestOTELSink_Race(t *testing.T) { wg.Add(1) go func(k string, v metricdata.Metrics) { defer wg.Done() - performSinkOperation(sink, k, v, errCh) + performSinkOperation(sink, k, v) }(k, v) } wg.Wait() @@ -481,28 +481,16 @@ func generateSamples(n int, labels map[string]string) map[string]metricdata.Metr } // performSinkOperation emits a measurement using the OTELSink and calls wg.Done() when completed. -func performSinkOperation(sink *OTELSink, k string, v metricdata.Metrics, errCh chan error) { +func performSinkOperation(sink *OTELSink, k string, v metricdata.Metrics) { key := strings.Split(k, ".") data := v.Data switch data := data.(type) { case metricdata.Gauge[float64]: - gauge, ok := data - if !ok { - errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) - } - sink.SetGauge(key, float32(gauge.DataPoints[0].Value)) + sink.SetGauge(key, float32(data.DataPoints[0].Value)) case metricdata.Sum[float64]: - sum, ok := data - if !ok { - errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) - } - sink.IncrCounter(key, float32(sum.DataPoints[0].Value)) + sink.IncrCounter(key, float32(data.DataPoints[0].Value)) case metricdata.Histogram[float64]: - hist, ok := data - if !ok { - errCh <- fmt.Errorf("unexpected type assertion error for key: %s", key) - } - sink.AddSample(key, float32(hist.DataPoints[0].Sum)) + sink.AddSample(key, float32(data.DataPoints[0].Sum)) } }